#define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
#define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
+// TODO: INVLPG_ALL is expensive
+#define GET_TEMP_MAPPING(cr3) do { \
+ __ASM__("cli"); \
+ __AtomicTestSetLoop( (Uint *)&TMPCR3(), (cr3) | 3 ); \
+ INVLPG_ALL(); \
+} while(0)
+#define REL_TEMP_MAPPING() do { \
+ TMPCR3() = 0; \
+ __ASM__("sti"); \
+} while(0)
+
// === CONSTS ===
//tPAddr * const gaPageTable = MM_FRACTAL_BASE;
void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected);
//void MM_DumpTables(tVAddr Start, tVAddr End);
int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
- int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
+tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr);
+ int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
// int MM_Map(tVAddr VAddr, tPAddr PAddr);
void MM_Unmap(tVAddr VAddr);
void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts);
int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
// === GLOBALS ===
-tMutex glMM_TempFractalLock;
+tShortSpinlock glMM_ZeroPage;
tPAddr gMM_ZeroPage;
// === CODE ===
ASSERT(paddr != curpage);
- tmp = (void*)MM_MapTemp(paddr);
+ tmp = MM_MapTemp(paddr);
memcpy( tmp, NextLevel, 0x1000 );
- MM_FreeTemp( (tVAddr)tmp );
+ MM_FreeTemp( tmp );
#if TRACE_COW
Log_Debug("MMVirt", "COW ent at %p (%p) from %P to %P", Ent, NextLevel, curpage, paddr);
// Print Stack Backtrace
Error_Backtrace(Regs->RIP, Regs->RBP);
- MM_DumpTables(0, -1);
+ //MM_DumpTables(0, -1);
return 1;
}
#define CANOICAL(addr) ((addr)&0x800000000000?(addr)|0xFFFF000000000000:(addr))
LogF("%016llx => ", CANOICAL(RangeStart));
// LogF("%6llx %6llx %6llx %016llx => ",
-// MM_GetPhysAddr( (tVAddr)&PAGEDIRPTR(RangeStart>>30) ),
-// MM_GetPhysAddr( (tVAddr)&PAGEDIR(RangeStart>>21) ),
-// MM_GetPhysAddr( (tVAddr)&PAGETABLE(RangeStart>>12) ),
+// MM_GetPhysAddr( &PAGEDIRPTR(RangeStart>>30) ),
+// MM_GetPhysAddr( &PAGEDIR(RangeStart>>21) ),
+// MM_GetPhysAddr( &PAGETABLE(RangeStart>>12) ),
// CANOICAL(RangeStart)
// );
if( gMM_ZeroPage && (PAGETABLE(RangeStart>>12) & PADDR_MASK) == gMM_ZeroPage )
const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
tVAddr rangeStart = 0;
tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
- tVAddr curPos;
- Uint page;
tPAddr expected_pml4 = PF_WRITE|PF_USER;
tPAddr expected_pdp = PF_WRITE|PF_USER;
tPAddr expected_pd = PF_WRITE|PF_USER;
End &= (1L << 48) - 1;
- Start >>= 12; End >>= 12;
+ Start >>= 12;
+ End >>= 12;
- for(page = Start, curPos = Start<<12;
- page < End;
- curPos += 0x1000, page++)
+ // `page` will not overflow, End is 48-12 bits
+ tVAddr curPos = Start << 12;
+ for(Uint page = Start; page <= End; curPos += 0x1000, page++)
{
//Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
//Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
expected |= expected_pml4 & PF_NX;
expected |= expected_pdp & PF_NX;
expected |= expected_pd & PF_NX;
- Log("expected (pml4 = %x, pdp = %x, pd = %x)",
- expected_pml4, expected_pdp, expected_pd);
+// Log("expected (pml4 = %x, pdp = %x, pd = %x)",
+// expected_pml4, expected_pdp, expected_pd);
// Dump
MM_int_DumpTablesEnt( rangeStart, curPos - rangeStart, expected );
expected = CHANGEABLE_BITS;
* \param bTemp Use tempoary mappings
* \param bLarge Treat as a large page
*/
-int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
+int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
{
tPAddr *ent;
int rv;
ENTER("pVAddr PPAddr", VAddr, PAddr);
// Get page pointer (Allow allocating)
- rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
+ rv = MM_GetPageEntryPtr( (tVAddr)VAddr, bTemp, 1, bLarge, &ent);
if(rv < 0) LEAVE_RET('i', 0);
if( *ent & 1 ) LEAVE_RET('i', 0);
*ent = PAddr | 3;
- if( VAddr < 0x800000000000 )
+ if( (tVAddr)VAddr <= USER_MAX )
*ent |= PF_USER;
-
INVLPG( VAddr );
LEAVE('i', 1);
* \param VAddr Target virtual address
* \param PAddr Physical address of page
*/
-int MM_Map(tVAddr VAddr, tPAddr PAddr)
+int MM_Map(volatile void *VAddr, tPAddr PAddr)
{
return MM_MapEx(VAddr, PAddr, 0, 0);
}
// Check Page Dir
if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
- PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
+ tPAddr *ent = &PAGETABLE(VAddr >> PTAB_SHIFT);
+ *ent = 0;
INVLPG( VAddr );
}
/**
* \brief Allocate a block of memory at the specified virtual address
*/
-tPAddr MM_Allocate(tVAddr VAddr)
+tPAddr MM_Allocate(volatile void *VAddr)
{
tPAddr ret;
- ENTER("xVAddr", VAddr);
+ ENTER("pVAddr", VAddr);
// Ensure the tables are allocated before the page (keeps things neat)
- MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
+ MM_GetPageEntryPtr( (tVAddr)VAddr, 0, 1, 0, NULL );
// Allocate the page
ret = MM_AllocPhys();
return ret;
}
-tPAddr MM_AllocateZero(tVAddr VAddr)
+void MM_AllocateZero(volatile void *VAddr)
{
- tPAddr ret = gMM_ZeroPage;
-
- MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
+ // Ensure dir is populated
+ MM_GetPageEntryPtr((tVAddr)VAddr, 0, 1, 0, NULL);
- if(!gMM_ZeroPage) {
- ret = gMM_ZeroPage = MM_AllocPhys();
- MM_RefPhys(ret); // Don't free this please
- MM_Map(VAddr, ret);
- memset((void*)VAddr, 0, 0x1000);
+ if(!gMM_ZeroPage)
+ {
+ SHORTLOCK(&glMM_ZeroPage);
+ if( !gMM_ZeroPage )
+ {
+ gMM_ZeroPage = MM_AllocPhys();
+ MM_Map(VAddr, gMM_ZeroPage);
+ memset((void*)VAddr, 0, PAGE_SIZE);
+ }
+ SHORTREL(&glMM_ZeroPage);
}
- else {
- MM_Map(VAddr, ret);
+ else
+ {
+ MM_Map(VAddr, gMM_ZeroPage);
}
- MM_RefPhys(ret); // Refernce for this map
+ MM_RefPhys(gMM_ZeroPage); // Refernce for this map
MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
- return ret;
}
/**
* \brief Deallocate a page at a virtual address
*/
-void MM_Deallocate(tVAddr VAddr)
+void MM_Deallocate(volatile void *VAddr)
{
- tPAddr phys;
-
- phys = MM_GetPhysAddr(VAddr);
+ tPAddr phys = MM_GetPhysAddr( VAddr );
if(!phys) return ;
- MM_Unmap(VAddr);
+ MM_Unmap((tVAddr)VAddr);
MM_DerefPhys(phys);
}
/**
* \brief Get the physical address of a virtual location
*/
-tPAddr MM_GetPhysAddr(tVAddr Addr)
+tPAddr MM_GetPhysAddr(volatile const void *Ptr)
{
+ tVAddr Addr = (tVAddr)Ptr;
tPAddr *ptr;
int ret;
return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
}
+/**
+ * \brief Get the address of a page from another addres space
+ * \return Refenced physical address (or 0 on error)
+ */
+tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr)
+{
+ GET_TEMP_MAPPING(Process->MemState.CR3);
+ tPAddr ret = 0;
+ tPAddr *ptr;
+ if(MM_GetPageEntryPtr((tVAddr)Addr, 1,0,0, &ptr) == 0) // Temp, NoAlloc, NotLarge
+ {
+ if( *ptr & 1 )
+ {
+ ret = (*ptr & ~0xFFF) | ((tVAddr)Addr & 0xFFF);
+ MM_RefPhys( ret );
+ }
+ }
+ REL_TEMP_MAPPING();
+ return ret;
+}
+
/**
* \brief Sets the flags on a page
*/
-void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
+void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask)
{
tPAddr *ent;
int rv;
// Get pointer
- rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
+ rv = MM_GetPageEntryPtr( (tVAddr)VAddr, 0, 0, 0, &ent);
if(rv < 0) return ;
// Ensure the entry is valid
if( Flags & MM_PFLAG_COW ) {
*ent &= ~PF_WRITE;
*ent |= PF_COW;
- INVLPG_ALL();
}
else {
*ent &= ~PF_COW;
/**
* \brief Get the flags applied to a page
*/
-Uint MM_GetFlags(tVAddr VAddr)
+Uint MM_GetFlags(volatile const void *VAddr)
{
tPAddr *ent;
int rv, ret = 0;
- rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
+ rv = MM_GetPageEntryPtr((tVAddr)VAddr, 0, 0, 0, &ent);
if(rv < 0) return 0;
if( !(*ent & 1) ) return 0;
Size += Addr & (PAGE_SIZE-1);
Addr &= ~(PAGE_SIZE-1);
- Addr &= ((1UL << 48)-1); // Clap to address space
+ // NC addr
+ if( ((Addr >> 47) & 1) != ((Addr>>48) == 0xFFFF))
+ return 0;
+ Addr &= ((1UL << 48)-1); // Clamp to address space
pml4 = Addr >> 39;
pdp = Addr >> 30;
dir = Addr >> 21;
tab = Addr >> 12;
- if( !(PAGEMAPLVL4(pml4) & 1) ) return 0;
- if( !(PAGEDIRPTR(pdp) & 1) ) return 0;
- if( !(PAGEDIR(dir) & 1) ) return 0;
- if( !(PAGETABLE(tab) & 1) ) return 0;
+ if( !(PAGEMAPLVL4(pml4) & 1) ) {
+ Log_Debug("MMVirt", "PML4E %i NP", pml4);
+ return 0;
+ }
+ if( !(PAGEDIRPTR(pdp) & 1) ) {
+ Log_Debug("MMVirt", "PDPE %i NP", pdp);
+ return 0;
+ }
+ if( !(PAGEDIR(dir) & 1) ) {
+ Log_Debug("MMVirt", "PDE %i NP", dir);
+ return 0;
+ }
+ if( !(PAGETABLE(tab) & 1) ) {
+ Log_Debug("MMVirt", "PTE %i NP", tab);
+ return 0;
+ }
bIsUser = !!(PAGETABLE(tab) & PF_USER);
while( Size >= PAGE_SIZE )
{
+ tab ++;
+ Size -= PAGE_SIZE;
+
if( (tab & 511) == 0 )
{
dir ++;
- if( ((dir >> 9) & 511) == 0 )
+ if( (dir & 511) == 0 )
{
pdp ++;
- if( ((pdp >> 18) & 511) == 0 )
+ if( (pdp & 511) == 0 )
{
pml4 ++;
- if( !(PAGEMAPLVL4(pml4) & 1) ) return 0;
+ if( !(PAGEMAPLVL4(pml4) & 1) ) {
+ Log_Debug("MMVirt", "IsValidBuffer - PML4E %x NP, Size=%x", pml4, Size);
+ return 0;
+ }
+ }
+ if( !(PAGEDIRPTR(pdp) & 1) ) {
+ Log_Debug("MMVirt", "IsValidBuffer - PDPE %x NP", pdp);
+ return 0;
}
- if( !(PAGEDIRPTR(pdp) & 1) ) return 0;
}
- if( !(PAGEDIR(dir) & 1) ) return 0;
+ if( !(PAGEDIR(dir) & 1) ) {
+ Log_Debug("MMVirt", "IsValidBuffer - PDE %x NP", dir);
+ return 0;
+ }
}
- if( !(PAGETABLE(tab) & 1) ) return 0;
- if( bIsUser && !(PAGETABLE(tab) & PF_USER) ) return 0;
-
- tab ++;
- Size -= PAGE_SIZE;
+ if( !(PAGETABLE(tab) & 1) ) {
+ Log_Debug("MMVirt", "IsValidBuffer - PTE %x NP", tab);
+ return 0;
+ }
+ if( bIsUser && !(PAGETABLE(tab) & PF_USER) ) {
+ Log_Debug("MMVirt", "IsValidBuffer - PTE %x Not user", tab);
+ return 0;
+ }
}
return 1;
}
/**
* \brief Map a range of hardware pages
*/
-tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
+void *MM_MapHWPages(tPAddr PAddr, Uint Number)
{
- tVAddr ret;
- int num;
-
//TODO: Add speedups (memory of first possible free)
- for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
+ for( tPage *ret = (void*)MM_HWMAP_BASE; ret < (tPage*)MM_HWMAP_TOP; ret ++ )
{
- for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
+ // Check if this region has already been used
+ int num;
+ for( num = Number; num -- && ret < (tPage*)MM_HWMAP_TOP; ret ++ )
{
- if( MM_GetPhysAddr(ret) != 0 ) break;
+ if( MM_GetPhysAddr( ret ) != 0 )
+ break;
}
if( num >= 0 ) continue;
// Log_Debug("MMVirt", "Mapping %i pages to %p (base %P)", Number, ret-Number*0x1000, PAddr);
+ // Map backwards (because `ret` is at the top of the region atm)
PAddr += 0x1000 * Number;
-
while( Number -- )
{
- ret -= 0x1000;
+ ret --;
PAddr -= 0x1000;
MM_Map(ret, PAddr);
MM_RefPhys(PAddr);
/**
* \brief Free a range of hardware pages
*/
-void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
+void MM_UnmapHWPages(volatile void *VAddr, Uint Number)
{
// Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
+ tPage *page = (void*)VAddr;
while( Number -- )
{
- MM_DerefPhys( MM_GetPhysAddr(VAddr) );
- MM_Unmap(VAddr);
- VAddr += 0x1000;
+ MM_DerefPhys( MM_GetPhysAddr(page) );
+ MM_Unmap((tVAddr)page);
+ page ++;
}
}
* \param PhysAddr Pointer to the location to place the physical address allocated
* \return Virtual address allocate
*/
-tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
+void *MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
{
tPAddr phys;
- tVAddr ret;
+ void *ret;
// Sanity Check
- if(MaxBits < 12 || !PhysAddr) return 0;
+ ASSERTCR(MaxBits, >=, 12, NULL);
// Fast Allocate
if(Pages == 1 && MaxBits >= PHYS_BITS)
{
phys = MM_AllocPhys();
- *PhysAddr = phys;
ret = MM_MapHWPages(phys, 1);
MM_DerefPhys(phys);
+ if(PhysAddr)
+ *PhysAddr = phys;
return ret;
}
// Allocated successfully, now map
ret = MM_MapHWPages(phys, Pages);
+ if(PhysAddr)
+ *PhysAddr = phys;
// MapHWPages references the pages, so deref them back down to 1
for(;Pages--;phys+=0x1000)
MM_DerefPhys(phys);
return 0;
}
- *PhysAddr = phys;
return ret;
}
// --- Tempory Mappings ---
-tVAddr MM_MapTemp(tPAddr PAddr)
+void *MM_MapTemp(tPAddr PAddr)
{
const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
tVAddr ret = MM_TMPMAP_BASE;
- int i;
- for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
+ for( int i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
{
tPAddr *ent;
if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
*ent = PAddr | 3;
MM_RefPhys(PAddr);
INVLPG(ret);
- return ret;
+ return (void*)ret;
}
return 0;
}
-void MM_FreeTemp(tVAddr VAddr)
+void *MM_MapTempFromProc(tProcess *Process, const void *VAddr)
+{
+ // Get paddr
+ tPAddr paddr = MM_GetPageFromAS(Process, VAddr);
+ if( paddr == 0 )
+ return NULL;
+ return MM_MapTemp(paddr);
+}
+
+void MM_FreeTemp(void *Ptr)
{
- MM_Deallocate(VAddr);
- return ;
+ MM_Deallocate(Ptr);
}
// --- Address Space Clone --
-tPAddr MM_Clone(void)
+tPAddr MM_Clone(int bNoUserCopy)
{
tPAddr ret;
int i;
- tVAddr kstackbase;
// #1 Create a copy of the PML4
ret = MM_AllocPhys();
if(!ret) return 0;
// #2 Alter the fractal pointer
- Mutex_Acquire(&glMM_TempFractalLock);
- TMPCR3() = ret | 3;
- INVLPG_ALL();
+ GET_TEMP_MAPPING(ret);
// #3 Set Copy-On-Write to all user pages
- if( Threads_GetPID() != 0 )
+ if( Threads_GetPID() != 0 && !bNoUserCopy )
{
for( i = 0; i < 256; i ++)
{
// #6 Create kernel stack
// tThread->KernelStack is the top
// There is 1 guard page below the stack
- kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
+ tPage *kstackbase = (void*)( Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE );
// Clone stack
TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
- for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
+ for( i = 1; i < KERNEL_STACK_SIZE/PAGE_SIZE; i ++ )
{
tPAddr phys = MM_AllocPhys();
- tVAddr tmpmapping;
- MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
+ void *tmpmapping;
+ MM_MapEx(kstackbase + i, phys, 1, 0);
tmpmapping = MM_MapTemp(phys);
- if( MM_GetPhysAddr( kstackbase+i*0x1000 ) )
- memcpy((void*)tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
+ // If the current thread's stack is shorter than the new one, zero
+ if( MM_GetPhysAddr( kstackbase + i ) )
+ memcpy(tmpmapping, kstackbase + i, 0x1000);
else
- memset((void*)tmpmapping, 0, 0x1000);
+ memset(tmpmapping, 0, 0x1000);
// if( i == 0xF )
// Debug_HexDump("MM_Clone: *tmpmapping = ", (void*)tmpmapping, 0x1000);
MM_FreeTemp(tmpmapping);
// MAGIC_BREAK();
// #7 Return
- TMPCR3() = 0;
- INVLPG_ALL();
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
// Log("MM_Clone: RETURN %P", ret);
return ret;
}
void MM_ClearUser(void)
{
- MM_int_ClearTableLevel(0, 39, 256);
+ MM_int_ClearTableLevel(0, 39, 256);
}
tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize)
int i;
// #1 Set temp fractal to PID0
- Mutex_Acquire(&glMM_TempFractalLock);
- TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
- INVLPG_ALL();
+ GET_TEMP_MAPPING( ((tPAddr)gInitialPML4 - KERNEL_BASE) );
// #2 Scan for a free stack addresss < 2^47
for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
if( !(*ptr & 1) ) break;
}
if( ret >= (1ULL << 47) ) {
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
return 0;
}
Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
return 0;
}
- MM_MapEx(ret + i*0x1000, phys, 1, 0);
- MM_SetFlags(ret + i*0x1000, MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL);
+ MM_MapEx( (void*)(ret + i*0x1000), phys, 1, 0);
+ // XXX: ... this doesn't change the correct address space
+ MM_SetFlags( (void*)(ret + i*0x1000), MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL);
}
// Copy data
Log_Error("MM", "MM_NewWorkerStack: StackSize(0x%x) > 0x1000, cbf handling", StackSize);
}
else {
- tVAddr tmp_addr, dest;
+ void *tmp_addr, *dest;
tmp_addr = MM_MapTemp(phys);
- dest = tmp_addr + (0x1000 - StackSize);
- memcpy( (void*)dest, StackData, StackSize );
- Log_Debug("MM", "MM_NewWorkerStack: %p->%p %i bytes (i=%i)", StackData, dest, StackSize, i);
- Log_Debug("MM", "MM_NewWorkerStack: ret = %p", ret);
+ dest = (char*)tmp_addr + (0x1000 - StackSize);
+ memcpy( dest, StackData, StackSize );
MM_FreeTemp(tmp_addr);
}
- TMPCR3() = 0;
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
return ret + i*0x1000;
}
Uint i;
for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
{
- if(MM_GetPhysAddr(base+KERNEL_STACK_SIZE-0x1000) != 0)
+ if(MM_GetPhysAddr( (void*)(base+KERNEL_STACK_SIZE-0x1000) ) != 0)
continue;
//Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
{
- if( !MM_Allocate(base+i) )
+ if( !MM_Allocate( (void*)(base+i) ) )
{
Log_Warning("MM", "MM_NewKStack - Allocation failed");
for( i -= 0x1000; i; i -= 0x1000)
- MM_Deallocate(base+i);
+ MM_Deallocate((void*)(base+i));
return 0;
}
}