4 * Virtual Memory Manager
9 #include <threads_int.h>
13 #define PHYS_BITS 52 // TODO: Move out
20 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
21 #define PAGE_MASK (((Uint)1 << 36)-1)
22 #define TABLE_MASK (((Uint)1 << 27)-1)
23 #define PDP_MASK (((Uint)1 << 18)-1)
24 #define PML4_MASK (((Uint)1 << 9)-1)
26 #define PF_PRESENT 0x001
27 #define PF_WRITE 0x002
29 #define PF_LARGE 0x000
31 #define PF_PAGED 0x400
32 #define PF_NX 0x80000000##00000000
35 #define PAGETABLE(idx) (*((Uint64*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
36 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
37 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
38 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
40 #define TMPCR3() PAGEMAPLVL4(MM_TMPFRAC_BASE>>39)
41 #define TMPTABLE(idx) (*((Uint64*)MM_TMPFRAC_BASE+((idx)&PAGE_MASK)))
42 #define TMPDIR(idx) PAGETABLE((MM_TMPFRAC_BASE>>12)+((idx)&TABLE_MASK))
43 #define TMPDIRPTR(idx) PAGEDIR((MM_TMPFRAC_BASE>>21)+((idx)&PDP_MASK))
44 #define TMPMAPLVL4(idx) PAGEDIRPTR((MM_TMPFRAC_BASE>>30)+((idx)&PML4_MASK))
46 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr))
47 #define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
48 #define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
51 //tPAddr * const gaPageTable = MM_FRACTAL_BASE;
54 extern void Error_Backtrace(Uint IP, Uint BP);
55 extern tPAddr gInitialPML4[512];
58 void MM_InitVirt(void);
59 //void MM_FinishVirtualInit(void);
60 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
61 void MM_DumpTables(tVAddr Start, tVAddr End);
62 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
63 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
64 // int MM_Map(tVAddr VAddr, tPAddr PAddr);
65 void MM_Unmap(tVAddr VAddr);
66 void MM_ClearUser(void);
67 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
70 tMutex glMM_TempFractalLock;
73 void MM_InitVirt(void)
75 MM_DumpTables(0, -1L);
78 void MM_FinishVirtualInit(void)
84 * \brief Called on a page fault
86 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
88 // TODO: Implement Copy-on-Write
90 if( gaPageDir [Addr>>22] & PF_PRESENT
91 && gaPageTable[Addr>>12] & PF_PRESENT
92 && gaPageTable[Addr>>12] & PF_COW )
95 if(MM_GetRefCount( gaPageTable[Addr>>12] & PADDR_MASK ) == 1)
97 gaPageTable[Addr>>12] &= ~PF_COW;
98 gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
102 //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
103 paddr = MM_DuplicatePage( Addr );
104 MM_DerefPhys( gaPageTable[Addr>>12] & PADDR_MASK );
105 gaPageTable[Addr>>12] &= PF_USER;
106 gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
109 INVLPG( Addr & ~0xFFF );
114 // If it was a user, tell the thread handler
116 Warning("%s %s %s memory%s",
117 (ErrorCode&4?"User":"Kernel"),
118 (ErrorCode&2?"write to":"read from"),
119 (ErrorCode&1?"bad/locked":"non-present"),
120 (ErrorCode&16?" (Instruction Fetch)":"")
122 Warning("User Pagefault: Instruction at %04x:%08x accessed %p",
123 Regs->CS, Regs->RIP, Addr);
124 __asm__ __volatile__ ("sti"); // Restart IRQs
125 // Threads_SegFault(Addr);
131 // -- Check Error Code --
133 Warning("Reserved Bits Trashed!");
136 Warning("%s %s %s memory%s",
137 (ErrorCode&4?"User":"Kernel"),
138 (ErrorCode&2?"write to":"read from"),
139 (ErrorCode&1?"bad/locked":"non-present"),
140 (ErrorCode&16?" (Instruction Fetch)":"")
144 Log("Code at %p accessed %p", Regs->RIP, Addr);
145 // Print Stack Backtrace
146 Error_Backtrace(Regs->RIP, Regs->RBP);
148 MM_DumpTables(0, -1);
150 __asm__ __volatile__ ("cli");
156 * \brief Dumps the layout of the page tables
158 void MM_DumpTables(tVAddr Start, tVAddr End)
160 #define CANOICAL(addr) ((addr)&0x800000000000?(addr)|0xFFFF000000000000:(addr))
161 const tPAddr CHANGEABLE_BITS = ~(PF_PRESENT|PF_WRITE|PF_USER|PF_COW|PF_PAGED) & 0xFFF;
162 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
163 tVAddr rangeStart = 0;
164 tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
168 Log("Table Entries: (%p to %p)", Start, End);
170 End &= (1L << 48) - 1;
172 Start >>= 12; End >>= 12;
174 for(page = Start, curPos = Start<<12;
176 curPos += 0x1000, page++)
178 if( curPos == 0x800000000000L )
179 curPos = 0xFFFF800000000000L;
181 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
182 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
183 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
184 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
187 if(!(PAGEMAPLVL4(page>>27) & PF_PRESENT)
188 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
189 || !(PAGEDIR(page>>9) & PF_PRESENT)
190 || !(PAGETABLE(page) & PF_PRESENT)
191 || (PAGETABLE(page) & MASK) != expected)
193 if(expected != CHANGEABLE_BITS) {
194 Log("%016llx => %013llx : 0x%6llx (%c%c%c%c)",
195 CANOICAL(rangeStart),
196 PAGETABLE(rangeStart>>12) & PADDR_MASK,
198 (expected & PF_PAGED ? 'p' : '-'),
199 (expected & PF_COW ? 'C' : '-'),
200 (expected & PF_USER ? 'U' : '-'),
201 (expected & PF_WRITE ? 'W' : '-')
203 expected = CHANGEABLE_BITS;
205 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
206 page += (1 << 27) - 1;
207 curPos += (1L << 39) - 0x1000;
208 //Debug("pml4 ent unset (page = 0x%x now)", page);
211 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
212 page += (1 << 18) - 1;
213 curPos += (1L << 30) - 0x1000;
214 //Debug("pdp ent unset (page = 0x%x now)", page);
217 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
218 page += (1 << 9) - 1;
219 curPos += (1L << 21) - 0x1000;
220 //Debug("pd ent unset (page = 0x%x now)", page);
223 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
225 expected = (PAGETABLE(page) & MASK);
228 if(expected != CHANGEABLE_BITS)
232 if(expected != CHANGEABLE_BITS) {
233 Log("%016llx => %013llx : 0x%6llx (%c%c%c%c)",
234 CANOICAL(rangeStart),
235 PAGETABLE(rangeStart>>12) & PADDR_MASK,
237 (expected & PF_PAGED ? 'p' : '-'),
238 (expected & PF_COW ? 'C' : '-'),
239 (expected & PF_USER ? 'U' : '-'),
240 (expected & PF_WRITE ? 'W' : '-')
248 * \brief Get a pointer to a page entry
249 * \param Addr Virtual Address
250 * \param bTemp Use the Temporary fractal mapping
251 * \param bAllocate Allocate entries
252 * \param bLargePage Request a large page
253 * \param Pointer Location to place the calculated pointer
254 * \return Page size, or -ve on error
256 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer)
260 const int ADDR_SIZES[] = {39, 30, 21, 12};
261 const int nADDR_SIZES = sizeof(ADDR_SIZES)/sizeof(ADDR_SIZES[0]);
266 pmlevels[3] = &TMPTABLE(0); // Page Table
267 pmlevels[2] = &TMPDIR(0); // PDIR
268 pmlevels[1] = &TMPDIRPTR(0); // PDPT
269 pmlevels[0] = &TMPMAPLVL4(0); // PML4
273 pmlevels[3] = (void*)MM_FRACTAL_BASE; // Page Table
274 pmlevels[2] = &pmlevels[3][(MM_FRACTAL_BASE>>12)&PAGE_MASK]; // PDIR
275 pmlevels[1] = &pmlevels[2][(MM_FRACTAL_BASE>>21)&TABLE_MASK]; // PDPT
276 pmlevels[0] = &pmlevels[1][(MM_FRACTAL_BASE>>30)&PDP_MASK]; // PML4
280 Addr &= (1ULL << 48)-1;
282 for( i = 0; i < nADDR_SIZES-1; i ++ )
284 // INVLPG( &pmlevels[i][ (Addr >> ADDR_SIZES[i]) &
286 // Check for a large page
287 if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) == 0 && bLargePage )
289 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
290 return ADDR_SIZES[i];
292 // Allocate an entry if required
293 if( !(pmlevels[i][Addr >> ADDR_SIZES[i]] & 1) )
295 if( !bAllocate ) return -4; // If allocation is not requested, error
296 tmp = MM_AllocPhys();
298 pmlevels[i][Addr >> ADDR_SIZES[i]] = tmp | 3;
299 INVLPG( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])*512 ] );
300 memset( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])*512 ], 0, 0x1000 );
303 else if( pmlevels[i][Addr >> ADDR_SIZES[i]] & PF_LARGE )
306 if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) != 0 ) return -3;
307 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
308 return ADDR_SIZES[i]; // Large page warning
312 // And, set the page table entry
313 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
314 return ADDR_SIZES[i];
318 * \brief Map a physical page to a virtual one
319 * \param VAddr Target virtual address
320 * \param PAddr Physical address of page
321 * \param bTemp Use tempoary mappings
322 * \param bLarge Treat as a large page
324 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
329 ENTER("xVAddr xPAddr", VAddr, PAddr);
331 // Get page pointer (Allow allocating)
332 rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
333 if(rv < 0) LEAVE_RET('i', 0);
335 if( *ent & 1 ) LEAVE_RET('i', 0);
346 * \brief Map a physical page to a virtual one
347 * \param VAddr Target virtual address
348 * \param PAddr Physical address of page
350 int MM_Map(tVAddr VAddr, tPAddr PAddr)
352 return MM_MapEx(VAddr, PAddr, 0, 0);
356 * \brief Removed a mapped page
358 void MM_Unmap(tVAddr VAddr)
361 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
363 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
365 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
367 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
372 * \brief Allocate a block of memory at the specified virtual address
374 tPAddr MM_Allocate(tVAddr VAddr)
378 ENTER("xVAddr", VAddr);
380 // Ensure the tables are allocated before the page (keeps things neat)
381 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
384 ret = MM_AllocPhys();
385 LOG("ret = %x", ret);
386 if(!ret) LEAVE_RET('i', 0);
388 if( !MM_Map(VAddr, ret) )
390 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
401 * \brief Deallocate a page at a virtual address
403 void MM_Deallocate(tVAddr VAddr)
407 phys = MM_GetPhysAddr(VAddr);
416 * \brief Get the page table entry of a virtual address
417 * \param Addr Virtual Address
418 * \param Phys Location to put the physical address
419 * \param Flags Flags on the entry (set to zero if unmapped)
420 * \return Size of the entry (in address bits) - 12 = 4KiB page
422 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags)
427 if(!Phys || !Flags) return 0;
429 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
430 if( ret < 0 ) return 0;
432 *Phys = *ptr & PADDR_MASK;
433 *Flags = *ptr & 0xFFF;
438 * \brief Get the physical address of a virtual location
440 tPAddr MM_GetPhysAddr(tVAddr Addr)
445 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
446 if( ret < 0 ) return 0;
448 if( !(*ptr & 1) ) return 0;
450 return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
454 * \brief Sets the flags on a page
456 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
462 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
465 // Ensure the entry is valid
466 if( !(*ent & 1) ) return ;
469 if( Mask & MM_PFLAG_RO )
471 if( Flags & MM_PFLAG_RO ) {
480 if( Mask & MM_PFLAG_KERNEL )
482 if( Flags & MM_PFLAG_KERNEL ) {
491 if( Mask & MM_PFLAG_COW )
493 if( Flags & MM_PFLAG_COW ) {
504 if( Mask & MM_PFLAG_EXEC )
506 if( Flags & MM_PFLAG_EXEC ) {
516 * \brief Get the flags applied to a page
518 Uint MM_GetFlags(tVAddr VAddr)
523 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
526 if( !(*ent & 1) ) return 0;
529 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
531 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
533 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
535 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
540 // --- Hardware Mappings ---
542 * \brief Map a range of hardware pages
544 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
549 //TODO: Add speedups (memory of first possible free)
550 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
552 for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
554 if( MM_GetPhysAddr(ret) != 0 ) break;
556 if( num >= 0 ) continue;
558 PAddr += 0x1000 * Number;
570 Log_KernelPanic("MM", "TODO: Implement MM_MapHWPages");
575 * \brief Free a range of hardware pages
577 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
579 // Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
589 * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
590 * \brief Allocates DMA physical memory
591 * \param Pages Number of pages required
592 * \param MaxBits Maximum number of bits the physical address can have
593 * \param PhysAddr Pointer to the location to place the physical address allocated
594 * \return Virtual address allocate
596 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
602 if(MaxBits < 12 || !PhysAddr) return 0;
605 if(Pages == 1 && MaxBits >= PHYS_BITS)
607 phys = MM_AllocPhys();
609 ret = MM_MapHWPages(phys, 1);
618 phys = MM_AllocPhysRange(Pages, MaxBits);
619 // - Was it allocated?
620 if(phys == 0) return 0;
622 // Allocated successfully, now map
623 ret = MM_MapHWPages(phys, Pages);
625 // If it didn't map, free then return 0
626 for(;Pages--;phys+=0x1000)
635 // --- Tempory Mappings ---
636 tVAddr MM_MapTemp(tPAddr PAddr)
638 const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
639 tVAddr ret = MM_TMPMAP_BASE;
642 for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
645 if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
658 void MM_FreeTemp(tVAddr VAddr)
660 MM_Deallocate(VAddr);
665 // --- Address Space Clone --
666 tPAddr MM_Clone(void)
672 // tThread->KernelStack is the top
673 // There is 1 guard page below the stack
674 kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE + 0x1000;
676 Log("MM_Clone: kstackbase = %p", kstackbase);
678 // #1 Create a copy of the PML4
679 ret = MM_AllocPhys();
682 // #2 Alter the fractal pointer
683 Mutex_Acquire(&glMM_TempFractalLock);
687 // #3 Set Copy-On-Write to all user pages
688 for( i = 0; i < 256; i ++)
690 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
691 // Log_Debug("MM", "TMPMAPLVL4(%i) = 0x%016llx", i, TMPMAPLVL4(i));
692 if( TMPMAPLVL4(i) & 1 )
694 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
695 TMPMAPLVL4(i) |= PF_COW;
696 TMPMAPLVL4(i) &= ~PF_WRITE;
700 // #4 Map in kernel pages
701 for( i = 256; i < 512; i ++ )
704 // 320 0xFFFFA.... - Kernel Stacks
705 if( i == 320 ) continue;
706 // 509 0xFFFFFE0.. - Fractal mapping
707 if( i == 509 ) continue;
708 // 510 0xFFFFFE8.. - Temp fractal mapping
709 if( i == 510 ) continue;
711 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
712 if( TMPMAPLVL4(i) & 1 )
713 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
716 // #5 Set fractal mapping
717 TMPMAPLVL4(509) = ret | 3;
718 TMPMAPLVL4(510) = 0; // Temp
720 // #6 Create kernel stack (-1 to account for the guard)
722 for( i = 0; i < KERNEL_STACK_SIZE/0x1000-1; i ++ )
724 tPAddr phys = MM_AllocPhys();
726 MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
728 tmpmapping = MM_MapTemp(phys);
729 memcpy((void*)tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
730 MM_FreeTemp(tmpmapping);
736 Mutex_Release(&glMM_TempFractalLock);
737 Log("MM_Clone: RETURN %P\n", ret);
741 void MM_ClearUser(void)
744 int pml4, pdpt, pd, pt;
746 for( pml4 = 0; pml4 < 256; pml4 ++ )
748 // Catch an un-allocated PML4 entry
749 if( !(PAGEMAPLVL4(pml4) & 1) ) {
750 addr += 1ULL << PML4_SHIFT;
755 if( (PAGEMAPLVL4(pml4) & PF_COW) ) {
756 addr += 1ULL << PML4_SHIFT;
763 for( pdpt = 0; pdpt < 512; pdpt ++ )
766 if( !(PAGEDIRPTR(addr >> PDP_SHIFT) & 1) ) {
767 addr += 1ULL << PDP_SHIFT;
772 if( (PAGEDIRPTR(addr >> PDP_SHIFT) & PF_COW) ) {
773 addr += 1ULL << PDP_SHIFT;
777 for( pd = 0; pd < 512; pd ++ )
779 // Unallocated PDir entry
780 if( !(PAGEDIR(addr >> PDIR_SHIFT) & 1) ) {
781 addr += 1ULL << PDIR_SHIFT;
786 if( PAGEDIR(addr >> PDIR_SHIFT) & PF_COW ) {
787 addr += 1ULL << PDIR_SHIFT;
791 // TODO: Catch large pages
794 for( pt = 0; pt < 512; pt ++ )
797 if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
798 MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
799 PAGETABLE(addr >> PTAB_SHIFT) = 0;
805 MM_DerefPhys( PAGEDIR(addr >> PDIR_SHIFT) & PADDR_MASK );
806 PAGEDIR(addr >> PDIR_SHIFT) = 0;
809 // Free page directory
810 MM_DerefPhys( PAGEDIRPTR(addr >> PDP_SHIFT) & PADDR_MASK );
811 PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
814 // Free page directory pointer table (PML4 entry)
815 MM_DerefPhys( PAGEMAPLVL4(pml4) & PADDR_MASK );
816 PAGEMAPLVL4(pml4) = 0;
820 tVAddr MM_NewWorkerStack(void)
825 // #1 Set temp fractal to PID0
826 Mutex_Acquire(&glMM_TempFractalLock);
827 TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
829 // #2 Scan for a free stack addresss < 2^47
830 for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
832 if( MM_GetPhysAddr(ret) == 0 ) break;
834 if( ret >= (1ULL << 47) ) {
835 Mutex_Release(&glMM_TempFractalLock);
839 // #3 Map all save the last page in the range
840 // - This acts as as guard page, and doesn't cost us anything.
841 for( i = 0; i < KERNEL_STACK_SIZE/0x1000 - 1; i ++ )
843 tPAddr phys = MM_AllocPhys();
846 Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
849 MM_MapEx(ret + i*0x1000, phys, 1, 0);
852 Mutex_Release(&glMM_TempFractalLock);
854 return ret + i*0x1000;
858 * \brief Allocate a new kernel stack
860 tVAddr MM_NewKStack(void)
862 tVAddr base = MM_KSTACK_BASE;
864 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
866 if(MM_GetPhysAddr(base) != 0)
869 //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
870 for( i = 0; i < KERNEL_STACK_SIZE; i += 0x1000)
872 if( !MM_Allocate(base+i) )
874 Log_Warning("MM", "MM_NewKStack - Allocation failed");
875 for( i -= 0x1000; i; i -= 0x1000)
876 MM_Deallocate(base+i);
881 return base + KERNEL_STACK_SIZE;
883 Log_Warning("MM", "MM_NewKStack - No address space left\n");