1
0
Fork 0

adapt dynamic core for OSX (adhere to 16 byte stack alignment, move link blocks to dynamic memory);

thanks to Mark Laws for his help and information


Imported-from: https://svn.code.sf.net/p/dosbox/code-0/dosbox/trunk@2704
This commit is contained in:
Sebastian Strohhäcker 2006-10-04 19:24:52 +00:00
parent 14d8f44e0e
commit 6448f2a398
4 changed files with 125 additions and 21 deletions

View file

@ -374,4 +374,8 @@ void CPU_Core_Dyn_X86_Cache_Init(bool enable_cache) {
cache_init(enable_cache);
}
void CPU_Core_Dyn_X86_Cache_Close(void) {
cache_close();
}
#endif

View file

@ -42,12 +42,6 @@ static struct {
CodePageHandler * last_page;
} cache;
#if (C_HAVE_MPROTECT)
static Bit8u cache_code_link_blocks[2][16] GCC_ATTRIBUTE(aligned(PAGESIZE));
#else
static Bit8u cache_code_link_blocks[2][16];
#endif
static CacheBlock link_blocks[2];
class CodePageHandler :public PageHandler {
@ -396,19 +390,21 @@ static INLINE void cache_addd(Bit32u val) {
static void gen_return(BlockReturn retcode);
static Bit8u * cache_code_start_ptr=NULL;
static Bit8u * cache_code=NULL;
static Bit8u * cache_code_link_blocks=NULL;
static CacheBlock * cache_blocks=NULL;
/* Define temporary pagesize so the MPROTECT case and the regular case share as much code as possible */
#if (C_HAVE_MPROTECT)
#define PAGESIZE_TEMP PAGESIZE
#else
#define PAGESIZE_TEMP 1
#define PAGESIZE_TEMP 4096
#endif
static bool cache_initialized = false;
static void cache_init(bool enable) {
static bool cache_initialized = false;
Bits i;
if (enable) {
if (cache_initialized) return;
@ -424,16 +420,17 @@ static void cache_init(bool enable) {
cache_blocks[i].cache.next=&cache_blocks[i+1];
}
}
if (cache_code_start_ptr==NULL) {
cache_code_start_ptr=(Bit8u*)malloc(CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP-1+PAGESIZE_TEMP);
if(!cache_code_start_ptr) E_Exit("Allocating dynamic cache failed");
cache_code=(Bit8u*)(((int)cache_code_start_ptr + PAGESIZE_TEMP-1) & ~(PAGESIZE_TEMP-1)); //MEM LEAK. store old pointer if you want to free it.
cache_code_link_blocks=cache_code;
cache_code+=PAGESIZE_TEMP;
#if (C_HAVE_MPROTECT)
if(mprotect(cache_code_link_blocks,sizeof(cache_code_link_blocks),PROT_WRITE|PROT_READ|PROT_EXEC))
LOG_MSG("Setting excute permission on cache code link blocks has failed");
#endif
if (cache_code==NULL) {
cache_code=(Bit8u*)malloc(CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP-1);
if(!cache_code) E_Exit("Allocating dynamic cache failed");
#if (C_HAVE_MPROTECT)
cache_code=(Bit8u*)(((int)cache_code + PAGESIZE-1) & ~(PAGESIZE-1)); //MEM LEAK. store old pointer if you want to free it.
if(mprotect(cache_code,CACHE_TOTAL+CACHE_MAXSIZE,PROT_WRITE|PROT_READ|PROT_EXEC))
if(mprotect(cache_code_link_blocks,CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP,PROT_WRITE|PROT_READ|PROT_EXEC))
LOG_MSG("Setting excute permission on the code cache has failed!");
#endif
CacheBlock * block=cache_getblock();
@ -444,10 +441,10 @@ static void cache_init(bool enable) {
block->cache.next=0; //Last block in the list
}
/* Setup the default blocks for block linkage returns */
cache.pos=&cache_code_link_blocks[0][0];
cache.pos=&cache_code_link_blocks[0];
link_blocks[0].cache.start=cache.pos;
gen_return(BR_Link1);
cache.pos=&cache_code_link_blocks[1][0];
cache.pos=&cache_code_link_blocks[32];
link_blocks[1].cache.start=cache.pos;
gen_return(BR_Link2);
cache.free_pages=0;
@ -461,3 +458,17 @@ static void cache_init(bool enable) {
}
}
}
static void cache_close(void) {
/* if (cache_blocks != NULL) {
free(cache_blocks);
cache_blocks = NULL;
}
if (cache_code_start_ptr != NULL) {
free(cache_code_start_ptr);
cache_code_start_ptr = NULL;
}
cache_code = NULL;
cache_code_link_blocks = NULL;
cache_initialized = false; */
}

View file

@ -110,6 +110,22 @@ return_address:
pop ebx
mov [retval],eax
}
#elif defined (MACOSX)
register Bit32u tempflags=reg_flags & FMASK_TEST;
__asm__ volatile (
"pushl %%ebx \n"
"pushl %%ebp \n"
"pushl $(run_return_adress) \n"
"pushl %2 \n"
"jmp *%3 \n"
"run_return_adress: \n"
"popl %%ebp \n"
"popl %%ebx \n"
:"=a" (retval), "=c" (tempflags)
:"r" (tempflags),"r" (code)
:"%edx","%edi","%esi","cc","memory"
);
reg_flags=(reg_flags & ~FMASK_TEST) | (tempflags & FMASK_TEST);
#else
register Bit32u tempflags=reg_flags & FMASK_TEST;
__asm__ volatile (
@ -629,15 +645,42 @@ static void gen_call_function(void * func,char * ops,...) {
if (ops) {
va_list params;
va_start(params,ops);
Bitu stack_used=0;
bool free_flags=false;
Bits pindex=0;
while (*ops) {
if (*ops=='%') {
pinfo[pindex].line=ops+1;
pinfo[pindex].value=va_arg(params,Bitu);
#if defined (MACOSX)
char * scan=pinfo[pindex].line;
if ((*scan=='I') || (*scan=='D')) stack_used+=4;
else if (*scan=='F') free_flags=true;
#endif
pindex++;
}
ops++;
}
#if defined (MACOSX)
/* align stack */
stack_used+=4; // saving esp on stack as well
cache_addw(0xc48b); // mov eax,esp
cache_addb(0x2d); // sub eax,stack_used
cache_addd(stack_used);
cache_addw(0xe083); // and eax,0xfffffff0
cache_addb(0xf0);
cache_addb(0x05); // sub eax,stack_used
cache_addd(stack_used);
cache_addb(0x94); // xchg eax,esp
if (free_flags) {
cache_addw(0xc083); // add eax,4
cache_addb(0x04);
}
cache_addb(0x50); // push eax (==old esp)
#endif
paramcount=0;
while (pindex) {
pindex--;
@ -694,7 +737,24 @@ static void gen_call_function(void * func,char * ops,...) {
IllegalOption("gen_call_function unknown param");
}
}
#if defined (MACOSX)
if (free_flags) release_flags=false;
} else {
/* align stack */
Bit32u stack_used=8; // saving esp and return address on the stack
cache_addw(0xc48b); // mov eax,esp
cache_addb(0x2d); // sub eax,stack_used
cache_addd(stack_used);
cache_addw(0xe083); // and eax,0xfffffff0
cache_addb(0xf0);
cache_addb(0x05); // sub eax,stack_used
cache_addd(stack_used);
cache_addb(0x94); // xchg eax,esp
cache_addb(0x50); // push esp (==old esp)
#endif
}
/* Clear some unprotected registers */
x86gen.regs[X86_REG_ECX]->Clear();
x86gen.regs[X86_REG_EDX]->Clear();
@ -733,6 +793,11 @@ static void gen_call_function(void * func,char * ops,...) {
}
/* Restore EAX registers to be used again */
x86gen.regs[X86_REG_EAX]->notusable=false;
#if defined (MACOSX)
/* restore stack */
cache_addb(0x5c); // pop esp
#endif
}
static void gen_call_write(DynReg * dr,Bit32u val,Bitu write_size) {
@ -741,6 +806,21 @@ static void gen_call_write(DynReg * dr,Bit32u val,Bitu write_size) {
x86gen.regs[X86_REG_EAX]->notusable=true;
gen_protectflags();
#if defined (MACOSX)
/* align stack */
Bitu stack_used=12;
cache_addw(0xc48b); // mov eax,esp
cache_addb(0x2d); // sub eax,stack_used
cache_addd(stack_used);
cache_addw(0xe083); // and eax,0xfffffff0
cache_addb(0xf0);
cache_addb(0x05); // sub eax,stack_used
cache_addd(stack_used);
cache_addb(0x94); // xchg eax,esp
cache_addb(0x50); // push eax (==old esp)
#endif
cache_addb(0x68); //PUSH val
cache_addd(val);
GenReg * genreg=FindDynReg(dr);
@ -762,6 +842,11 @@ static void gen_call_write(DynReg * dr,Bit32u val,Bitu write_size) {
cache_addb(2*4);
x86gen.regs[X86_REG_EAX]->notusable=false;
gen_releasereg(dr);
#if defined (MACOSX)
/* restore stack */
cache_addb(0x5c); // pop esp
#endif
}
static Bit8u * gen_create_branch(BranchTypes type) {
@ -774,7 +859,7 @@ static void gen_fill_branch(Bit8u * data,Bit8u * from=cache.pos) {
#if C_DEBUG
Bits len=from-data;
if (len<0) len=-len;
if (len>126) LOG_MSG("BIg jump %d",len);
if (len>126) LOG_MSG("Big jump %d",len);
#endif
*data=(from-data-1);
}

View file

@ -16,7 +16,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* $Id: cpu.cpp,v 1.85 2006-09-17 13:38:30 c2woody Exp $ */
/* $Id: cpu.cpp,v 1.86 2006-10-04 19:24:52 c2woody Exp $ */
#include <assert.h>
#include "dosbox.h"
@ -55,6 +55,7 @@ void CPU_Core_Normal_Init(void);
void CPU_Core_Simple_Init(void);
void CPU_Core_Dyn_X86_Init(void);
void CPU_Core_Dyn_X86_Cache_Init(bool enable_cache);
void CPU_Core_Dyn_X86_Cache_Close(void);
/* In debug mode exceptions are tested and dosbox exits when
* a unhandled exception state is detected.
@ -2084,6 +2085,9 @@ public:
static CPU * test;
void CPU_ShutDown(Section* sec) {
#if (C_DYNAMIC_X86)
CPU_Core_Dyn_X86_Cache_Close();
#endif
delete test;
}