X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;f=KernelLand%2FKernel%2Farch%2Fx86_64%2Fmm_virt.c;h=23f04ac2c5b2e73aa9f6fc19232f4e9e532307ec;hb=2d3c9cf685f5955a101b2cc1ef8bdc05fd9e4442;hp=14117f48cb94829fc106ceb61ef840c5476722b1;hpb=fb3abbad5dfd71ea2b190d0b33d9c57e879fb15a;p=tpg%2Facess2.git diff --git a/KernelLand/Kernel/arch/x86_64/mm_virt.c b/KernelLand/Kernel/arch/x86_64/mm_virt.c index 14117f48..23f04ac2 100644 --- a/KernelLand/Kernel/arch/x86_64/mm_virt.c +++ b/KernelLand/Kernel/arch/x86_64/mm_virt.c @@ -53,6 +53,17 @@ #define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;") #define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4") +// TODO: INVLPG_ALL is expensive +#define GET_TEMP_MAPPING(cr3) do { \ + __ASM__("cli"); \ + __AtomicTestSetLoop( (Uint *)&TMPCR3(), (cr3) | 3 ); \ + INVLPG_ALL(); \ +} while(0) +#define REL_TEMP_MAPPING() do { \ + TMPCR3() = 0; \ + __ASM__("sti"); \ +} while(0) + // === CONSTS === //tPAddr * const gaPageTable = MM_FRACTAL_BASE; @@ -70,7 +81,8 @@ void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected); //void MM_DumpTables(tVAddr Start, tVAddr End); int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer); - int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge); +tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr); + int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge); // int MM_Map(tVAddr VAddr, tPAddr PAddr); void MM_Unmap(tVAddr VAddr); void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts); @@ -78,7 +90,7 @@ void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts); int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags); // === GLOBALS === -tMutex glMM_TempFractalLock; +tShortSpinlock glMM_ZeroPage; tPAddr gMM_ZeroPage; // === CODE === @@ -259,7 +271,7 @@ int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) // Print Stack Backtrace Error_Backtrace(Regs->RIP, Regs->RBP); - MM_DumpTables(0, -1); + //MM_DumpTables(0, -1); return 1; } @@ -300,8 +312,6 @@ void MM_DumpTables(tVAddr Start, tVAddr End) const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits tVAddr rangeStart = 0; tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value - tVAddr curPos; - Uint page; tPAddr expected_pml4 = PF_WRITE|PF_USER; tPAddr expected_pdp = PF_WRITE|PF_USER; tPAddr expected_pd = PF_WRITE|PF_USER; @@ -310,11 +320,12 @@ void MM_DumpTables(tVAddr Start, tVAddr End) End &= (1L << 48) - 1; - Start >>= 12; End >>= 12; + Start >>= 12; + End >>= 12; - for(page = Start, curPos = Start<<12; - page < End; - curPos += 0x1000, page++) + // `page` will not overflow, End is 48-12 bits + tVAddr curPos = Start << 12; + for(Uint page = Start; page <= End; curPos += 0x1000, page++) { //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27)); //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18)); @@ -340,8 +351,8 @@ void MM_DumpTables(tVAddr Start, tVAddr End) expected |= expected_pml4 & PF_NX; expected |= expected_pdp & PF_NX; expected |= expected_pd & PF_NX; - Log("expected (pml4 = %x, pdp = %x, pd = %x)", - expected_pml4, expected_pdp, expected_pd); +// Log("expected (pml4 = %x, pdp = %x, pd = %x)", +// expected_pml4, expected_pdp, expected_pd); // Dump MM_int_DumpTablesEnt( rangeStart, curPos - rangeStart, expected ); expected = CHANGEABLE_BITS; @@ -470,7 +481,7 @@ int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, * \param bTemp Use tempoary mappings * \param bLarge Treat as a large page */ -int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge) +int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge) { tPAddr *ent; int rv; @@ -478,16 +489,15 @@ int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge) ENTER("pVAddr PPAddr", VAddr, PAddr); // Get page pointer (Allow allocating) - rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent); + rv = MM_GetPageEntryPtr( (tVAddr)VAddr, bTemp, 1, bLarge, &ent); if(rv < 0) LEAVE_RET('i', 0); if( *ent & 1 ) LEAVE_RET('i', 0); *ent = PAddr | 3; - if( VAddr < 0x800000000000 ) + if( (tVAddr)VAddr <= USER_MAX ) *ent |= PF_USER; - INVLPG( VAddr ); LEAVE('i', 1); @@ -499,7 +509,7 @@ int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge) * \param VAddr Target virtual address * \param PAddr Physical address of page */ -int MM_Map(tVAddr VAddr, tPAddr PAddr) +int MM_Map(volatile void *VAddr, tPAddr PAddr) { return MM_MapEx(VAddr, PAddr, 0, 0); } @@ -516,21 +526,22 @@ void MM_Unmap(tVAddr VAddr) // Check Page Dir if( !(PAGEDIR(VAddr >> 21) & 1) ) return ; - PAGETABLE(VAddr >> PTAB_SHIFT) = 0; + tPAddr *ent = &PAGETABLE(VAddr >> PTAB_SHIFT); + *ent = 0; INVLPG( VAddr ); } /** * \brief Allocate a block of memory at the specified virtual address */ -tPAddr MM_Allocate(tVAddr VAddr) +tPAddr MM_Allocate(volatile void *VAddr) { tPAddr ret; - ENTER("xVAddr", VAddr); + ENTER("pVAddr", VAddr); // Ensure the tables are allocated before the page (keeps things neat) - MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL); + MM_GetPageEntryPtr( (tVAddr)VAddr, 0, 1, 0, NULL ); // Allocate the page ret = MM_AllocPhys(); @@ -549,37 +560,39 @@ tPAddr MM_Allocate(tVAddr VAddr) return ret; } -tPAddr MM_AllocateZero(tVAddr VAddr) +void MM_AllocateZero(volatile void *VAddr) { - tPAddr ret = gMM_ZeroPage; - - MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL); + // Ensure dir is populated + MM_GetPageEntryPtr((tVAddr)VAddr, 0, 1, 0, NULL); - if(!gMM_ZeroPage) { - ret = gMM_ZeroPage = MM_AllocPhys(); - MM_RefPhys(ret); // Don't free this please - MM_Map(VAddr, ret); - memset((void*)VAddr, 0, 0x1000); + if(!gMM_ZeroPage) + { + SHORTLOCK(&glMM_ZeroPage); + if( !gMM_ZeroPage ) + { + gMM_ZeroPage = MM_AllocPhys(); + MM_Map(VAddr, gMM_ZeroPage); + memset((void*)VAddr, 0, PAGE_SIZE); + } + SHORTREL(&glMM_ZeroPage); } - else { - MM_Map(VAddr, ret); + else + { + MM_Map(VAddr, gMM_ZeroPage); } - MM_RefPhys(ret); // Refernce for this map + MM_RefPhys(gMM_ZeroPage); // Refernce for this map MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW); - return ret; } /** * \brief Deallocate a page at a virtual address */ -void MM_Deallocate(tVAddr VAddr) +void MM_Deallocate(volatile void *VAddr) { - tPAddr phys; - - phys = MM_GetPhysAddr( (void*)VAddr ); + tPAddr phys = MM_GetPhysAddr( VAddr ); if(!phys) return ; - MM_Unmap(VAddr); + MM_Unmap((tVAddr)VAddr); MM_DerefPhys(phys); } @@ -609,7 +622,7 @@ int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags) /** * \brief Get the physical address of a virtual location */ -tPAddr MM_GetPhysAddr(const void *Ptr) +tPAddr MM_GetPhysAddr(volatile const void *Ptr) { tVAddr Addr = (tVAddr)Ptr; tPAddr *ptr; @@ -623,16 +636,37 @@ tPAddr MM_GetPhysAddr(const void *Ptr) return (*ptr & PADDR_MASK) | (Addr & 0xFFF); } +/** + * \brief Get the address of a page from another addres space + * \return Refenced physical address (or 0 on error) + */ +tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr) +{ + GET_TEMP_MAPPING(Process->MemState.CR3); + tPAddr ret = 0; + tPAddr *ptr; + if(MM_GetPageEntryPtr((tVAddr)Addr, 1,0,0, &ptr) == 0) // Temp, NoAlloc, NotLarge + { + if( *ptr & 1 ) + { + ret = (*ptr & ~0xFFF) | ((tVAddr)Addr & 0xFFF); + MM_RefPhys( ret ); + } + } + REL_TEMP_MAPPING(); + return ret; +} + /** * \brief Sets the flags on a page */ -void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) +void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask) { tPAddr *ent; int rv; // Get pointer - rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent); + rv = MM_GetPageEntryPtr( (tVAddr)VAddr, 0, 0, 0, &ent); if(rv < 0) return ; // Ensure the entry is valid @@ -666,7 +700,6 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) if( Flags & MM_PFLAG_COW ) { *ent &= ~PF_WRITE; *ent |= PF_COW; - INVLPG_ALL(); } else { *ent &= ~PF_COW; @@ -689,12 +722,12 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) /** * \brief Get the flags applied to a page */ -Uint MM_GetFlags(tVAddr VAddr) +Uint MM_GetFlags(volatile const void *VAddr) { tPAddr *ent; int rv, ret = 0; - rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent); + rv = MM_GetPageEntryPtr((tVAddr)VAddr, 0, 0, 0, &ent); if(rv < 0) return 0; if( !(*ent & 1) ) return 0; @@ -722,43 +755,73 @@ int MM_IsValidBuffer(tVAddr Addr, size_t Size) Size += Addr & (PAGE_SIZE-1); Addr &= ~(PAGE_SIZE-1); - Addr &= ((1UL << 48)-1); // Clap to address space + // NC addr + if( ((Addr >> 47) & 1) != ((Addr>>48) == 0xFFFF)) + return 0; + Addr &= ((1UL << 48)-1); // Clamp to address space pml4 = Addr >> 39; pdp = Addr >> 30; dir = Addr >> 21; tab = Addr >> 12; - if( !(PAGEMAPLVL4(pml4) & 1) ) return 0; - if( !(PAGEDIRPTR(pdp) & 1) ) return 0; - if( !(PAGEDIR(dir) & 1) ) return 0; - if( !(PAGETABLE(tab) & 1) ) return 0; + if( !(PAGEMAPLVL4(pml4) & 1) ) { + Log_Debug("MMVirt", "PML4E %i NP", pml4); + return 0; + } + if( !(PAGEDIRPTR(pdp) & 1) ) { + Log_Debug("MMVirt", "PDPE %i NP", pdp); + return 0; + } + if( !(PAGEDIR(dir) & 1) ) { + Log_Debug("MMVirt", "PDE %i NP", dir); + return 0; + } + if( !(PAGETABLE(tab) & 1) ) { + Log_Debug("MMVirt", "PTE %i NP", tab); + return 0; + } bIsUser = !!(PAGETABLE(tab) & PF_USER); while( Size >= PAGE_SIZE ) { + tab ++; + Size -= PAGE_SIZE; + if( (tab & 511) == 0 ) { dir ++; - if( ((dir >> 9) & 511) == 0 ) + if( (dir & 511) == 0 ) { pdp ++; - if( ((pdp >> 18) & 511) == 0 ) + if( (pdp & 511) == 0 ) { pml4 ++; - if( !(PAGEMAPLVL4(pml4) & 1) ) return 0; + if( !(PAGEMAPLVL4(pml4) & 1) ) { + Log_Debug("MMVirt", "IsValidBuffer - PML4E %x NP, Size=%x", pml4, Size); + return 0; + } } - if( !(PAGEDIRPTR(pdp) & 1) ) return 0; + if( !(PAGEDIRPTR(pdp) & 1) ) { + Log_Debug("MMVirt", "IsValidBuffer - PDPE %x NP", pdp); + return 0; + } + } + if( !(PAGEDIR(dir) & 1) ) { + Log_Debug("MMVirt", "IsValidBuffer - PDE %x NP", dir); + return 0; } - if( !(PAGEDIR(dir) & 1) ) return 0; } - if( !(PAGETABLE(tab) & 1) ) return 0; - if( bIsUser && !(PAGETABLE(tab) & PF_USER) ) return 0; - - tab ++; - Size -= PAGE_SIZE; + if( !(PAGETABLE(tab) & 1) ) { + Log_Debug("MMVirt", "IsValidBuffer - PTE %x NP", tab); + return 0; + } + if( bIsUser && !(PAGETABLE(tab) & PF_USER) ) { + Log_Debug("MMVirt", "IsValidBuffer - PTE %x Not user", tab); + return 0; + } } return 1; } @@ -767,28 +830,27 @@ int MM_IsValidBuffer(tVAddr Addr, size_t Size) /** * \brief Map a range of hardware pages */ -tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number) +void *MM_MapHWPages(tPAddr PAddr, Uint Number) { - tVAddr ret; - int num; - //TODO: Add speedups (memory of first possible free) - for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 ) + for( tPage *ret = (void*)MM_HWMAP_BASE; ret < (tPage*)MM_HWMAP_TOP; ret ++ ) { - for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 ) + // Check if this region has already been used + int num; + for( num = Number; num -- && ret < (tPage*)MM_HWMAP_TOP; ret ++ ) { - if( MM_GetPhysAddr( (void*)ret ) != 0 ) + if( MM_GetPhysAddr( ret ) != 0 ) break; } if( num >= 0 ) continue; // Log_Debug("MMVirt", "Mapping %i pages to %p (base %P)", Number, ret-Number*0x1000, PAddr); + // Map backwards (because `ret` is at the top of the region atm) PAddr += 0x1000 * Number; - while( Number -- ) { - ret -= 0x1000; + ret --; PAddr -= 0x1000; MM_Map(ret, PAddr); MM_RefPhys(PAddr); @@ -804,14 +866,15 @@ tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number) /** * \brief Free a range of hardware pages */ -void MM_UnmapHWPages(tVAddr VAddr, Uint Number) +void MM_UnmapHWPages(volatile void *VAddr, Uint Number) { // Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages"); + tPage *page = (void*)VAddr; while( Number -- ) { - MM_DerefPhys( MM_GetPhysAddr((void*)VAddr) ); - MM_Unmap(VAddr); - VAddr += 0x1000; + MM_DerefPhys( MM_GetPhysAddr(page) ); + MM_Unmap((tVAddr)page); + page ++; } } @@ -824,21 +887,22 @@ void MM_UnmapHWPages(tVAddr VAddr, Uint Number) * \param PhysAddr Pointer to the location to place the physical address allocated * \return Virtual address allocate */ -tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) +void *MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) { tPAddr phys; - tVAddr ret; + void *ret; // Sanity Check - if(MaxBits < 12 || !PhysAddr) return 0; + ASSERTCR(MaxBits, >=, 12, NULL); // Fast Allocate if(Pages == 1 && MaxBits >= PHYS_BITS) { phys = MM_AllocPhys(); - *PhysAddr = phys; ret = MM_MapHWPages(phys, 1); MM_DerefPhys(phys); + if(PhysAddr) + *PhysAddr = phys; return ret; } @@ -849,6 +913,8 @@ tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) // Allocated successfully, now map ret = MM_MapHWPages(phys, Pages); + if(PhysAddr) + *PhysAddr = phys; // MapHWPages references the pages, so deref them back down to 1 for(;Pages--;phys+=0x1000) MM_DerefPhys(phys); @@ -857,7 +923,6 @@ tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) return 0; } - *PhysAddr = phys; return ret; } @@ -866,9 +931,8 @@ void *MM_MapTemp(tPAddr PAddr) { const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE; tVAddr ret = MM_TMPMAP_BASE; - int i; - for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE ) + for( int i = 0; i < max_slots; i ++, ret += PAGE_SIZE ) { tPAddr *ent; if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) { @@ -886,31 +950,36 @@ void *MM_MapTemp(tPAddr PAddr) return 0; } +void *MM_MapTempFromProc(tProcess *Process, const void *VAddr) +{ + // Get paddr + tPAddr paddr = MM_GetPageFromAS(Process, VAddr); + if( paddr == 0 ) + return NULL; + return MM_MapTemp(paddr); +} + void MM_FreeTemp(void *Ptr) { - MM_Deallocate((tVAddr)Ptr); - return ; + MM_Deallocate(Ptr); } // --- Address Space Clone -- -tPAddr MM_Clone(void) +tPAddr MM_Clone(int bNoUserCopy) { tPAddr ret; int i; - tVAddr kstackbase; // #1 Create a copy of the PML4 ret = MM_AllocPhys(); if(!ret) return 0; // #2 Alter the fractal pointer - Mutex_Acquire(&glMM_TempFractalLock); - TMPCR3() = ret | 3; - INVLPG_ALL(); + GET_TEMP_MAPPING(ret); // #3 Set Copy-On-Write to all user pages - if( Threads_GetPID() != 0 ) + if( Threads_GetPID() != 0 && !bNoUserCopy ) { for( i = 0; i < 256; i ++) { @@ -961,19 +1030,20 @@ tPAddr MM_Clone(void) // #6 Create kernel stack // tThread->KernelStack is the top // There is 1 guard page below the stack - kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE; + tPage *kstackbase = (void*)( Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE ); // Clone stack TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0; - for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ ) + for( i = 1; i < KERNEL_STACK_SIZE/PAGE_SIZE; i ++ ) { tPAddr phys = MM_AllocPhys(); void *tmpmapping; - MM_MapEx(kstackbase+i*0x1000, phys, 1, 0); + MM_MapEx(kstackbase + i, phys, 1, 0); tmpmapping = MM_MapTemp(phys); - if( MM_GetPhysAddr( (void*)(kstackbase+i*0x1000) ) ) - memcpy(tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000); + // If the current thread's stack is shorter than the new one, zero + if( MM_GetPhysAddr( kstackbase + i ) ) + memcpy(tmpmapping, kstackbase + i, 0x1000); else memset(tmpmapping, 0, 0x1000); // if( i == 0xF ) @@ -984,9 +1054,7 @@ tPAddr MM_Clone(void) // MAGIC_BREAK(); // #7 Return - TMPCR3() = 0; - INVLPG_ALL(); - Mutex_Release(&glMM_TempFractalLock); + REL_TEMP_MAPPING(); // Log("MM_Clone: RETURN %P", ret); return ret; } @@ -1020,7 +1088,7 @@ void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts) void MM_ClearUser(void) { - MM_int_ClearTableLevel(0, 39, 256); + MM_int_ClearTableLevel(0, 39, 256); } tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize) @@ -1030,9 +1098,7 @@ tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize) int i; // #1 Set temp fractal to PID0 - Mutex_Acquire(&glMM_TempFractalLock); - TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3; - INVLPG_ALL(); + GET_TEMP_MAPPING( ((tPAddr)gInitialPML4 - KERNEL_BASE) ); // #2 Scan for a free stack addresss < 2^47 for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE) @@ -1042,7 +1108,7 @@ tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize) if( !(*ptr & 1) ) break; } if( ret >= (1ULL << 47) ) { - Mutex_Release(&glMM_TempFractalLock); + REL_TEMP_MAPPING(); return 0; } @@ -1057,8 +1123,9 @@ tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize) Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page"); return 0; } - MM_MapEx(ret + i*0x1000, phys, 1, 0); - MM_SetFlags(ret + i*0x1000, MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL); + MM_MapEx( (void*)(ret + i*0x1000), phys, 1, 0); + // XXX: ... this doesn't change the correct address space + MM_SetFlags( (void*)(ret + i*0x1000), MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL); } // Copy data @@ -1070,13 +1137,10 @@ tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize) tmp_addr = MM_MapTemp(phys); dest = (char*)tmp_addr + (0x1000 - StackSize); memcpy( dest, StackData, StackSize ); - Log_Debug("MM", "MM_NewWorkerStack: %p->%p %i bytes (i=%i)", StackData, dest, StackSize, i); - Log_Debug("MM", "MM_NewWorkerStack: ret = %p", ret); MM_FreeTemp(tmp_addr); } - TMPCR3() = 0; - Mutex_Release(&glMM_TempFractalLock); + REL_TEMP_MAPPING(); return ret + i*0x1000; } @@ -1096,11 +1160,11 @@ tVAddr MM_NewKStack(void) //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE); for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000) { - if( !MM_Allocate(base+i) ) + if( !MM_Allocate( (void*)(base+i) ) ) { Log_Warning("MM", "MM_NewKStack - Allocation failed"); for( i -= 0x1000; i; i -= 0x1000) - MM_Deallocate(base+i); + MM_Deallocate((void*)(base+i)); return 0; } }