4 * Virtual Memory Manager
9 #include <threads_int.h>
13 #define PHYS_BITS 52 // TODO: Move out
21 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
22 #define PAGE_MASK ((1LL << 36)-1)
23 #define TABLE_MASK ((1LL << 27)-1)
24 #define PDP_MASK ((1LL << 18)-1)
25 #define PML4_MASK ((1LL << 9)-1)
27 #define PF_PRESENT 0x001
28 #define PF_WRITE 0x002
30 #define PF_LARGE 0x080
31 #define PF_GLOBAL 0x100
33 #define PF_PAGED 0x400
34 #define PF_NX 0x80000000##00000000
37 #define PAGETABLE(idx) (*((Uint64*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
38 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
39 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
40 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
42 #define TMPCR3() PAGEMAPLVL4(MM_TMPFRAC_BASE>>39)
43 #define TMPTABLE(idx) (*((Uint64*)MM_TMPFRAC_BASE+((idx)&PAGE_MASK)))
44 #define TMPDIR(idx) PAGETABLE((MM_TMPFRAC_BASE>>12)+((idx)&TABLE_MASK))
45 #define TMPDIRPTR(idx) PAGEDIR((MM_TMPFRAC_BASE>>21)+((idx)&PDP_MASK))
46 #define TMPMAPLVL4(idx) PAGEDIRPTR((MM_TMPFRAC_BASE>>30)+((idx)&PML4_MASK))
48 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr))
49 #define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
50 #define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
53 //tPAddr * const gaPageTable = MM_FRACTAL_BASE;
56 extern void Error_Backtrace(Uint IP, Uint BP);
57 extern tPAddr gInitialPML4[512];
58 extern void Threads_SegFault(tVAddr Addr);
61 void MM_InitVirt(void);
62 //void MM_FinishVirtualInit(void);
63 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
64 void MM_DumpTables(tVAddr Start, tVAddr End);
65 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
66 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
67 // int MM_Map(tVAddr VAddr, tPAddr PAddr);
68 void MM_Unmap(tVAddr VAddr);
69 void MM_ClearUser(void);
70 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
73 tMutex glMM_TempFractalLock;
76 void MM_InitVirt(void)
78 // MM_DumpTables(0, -1L);
81 void MM_FinishVirtualInit(void)
87 * \brief Called on a page fault
89 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
91 // TODO: Implement Copy-on-Write
93 if( PAGEMAPLVL4(Addr>>39) & PF_PRESENT
94 && PAGEDIRPTR (Addr>>30) & PF_PRESENT
95 && PAGEDIR (Addr>>21) & PF_PRESENT
96 && PAGETABLE (Addr>>12) & PF_PRESENT
97 && PAGETABLE (Addr>>12) & PF_COW )
100 if(MM_GetRefCount( PAGETABLE(Addr>>12) & PADDR_MASK ) == 1)
102 PAGETABLE(Addr>>12) &= ~PF_COW;
103 PAGETABLE(Addr>>12) |= PF_PRESENT|PF_WRITE;
107 //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
108 paddr = MM_AllocPhys();
110 Threads_SegFault(Addr);
114 void *tmp = (void*)MM_MapTemp(paddr);
115 memcpy( tmp, (void*)(Addr & ~0xFFF), 0x1000 );
116 MM_FreeTemp( (tVAddr)tmp );
118 MM_DerefPhys( PAGETABLE(Addr>>12) & PADDR_MASK );
119 PAGETABLE(Addr>>12) &= PF_USER;
120 PAGETABLE(Addr>>12) |= paddr|PF_PRESENT|PF_WRITE;
123 INVLPG( Addr & ~0xFFF );
128 // If it was a user, tell the thread handler
130 Warning("User %s %s memory%s",
131 (ErrorCode&2?"write to":"read from"),
132 (ErrorCode&1?"bad/locked":"non-present"),
133 (ErrorCode&16?" (Instruction Fetch)":"")
135 Warning("User Pagefault: Instruction at %04x:%p accessed %p",
136 Regs->CS, Regs->RIP, Addr);
137 __asm__ __volatile__ ("sti"); // Restart IRQs
138 Threads_SegFault(Addr);
144 // -- Check Error Code --
146 Warning("Reserved Bits Trashed!");
149 Warning("Kernel %s %s memory%s",
150 (ErrorCode&2?"write to":"read from"),
151 (ErrorCode&1?"bad/locked":"non-present"),
152 (ErrorCode&16?" (Instruction Fetch)":"")
156 Log("Code at %p accessed %p", Regs->RIP, Addr);
157 // Print Stack Backtrace
158 Error_Backtrace(Regs->RIP, Regs->RBP);
160 MM_DumpTables(0, -1);
162 __asm__ __volatile__ ("cli");
168 * \brief Dumps the layout of the page tables
170 void MM_DumpTables(tVAddr Start, tVAddr End)
172 #define CANOICAL(addr) ((addr)&0x800000000000?(addr)|0xFFFF000000000000:(addr))
173 const tPAddr CHANGEABLE_BITS = ~(PF_PRESENT|PF_WRITE|PF_USER|PF_COW|PF_PAGED) & 0xFFF;
174 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
175 tVAddr rangeStart = 0;
176 tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
180 Log("Table Entries: (%p to %p)", Start, End);
182 End &= (1L << 48) - 1;
184 Start >>= 12; End >>= 12;
186 for(page = Start, curPos = Start<<12;
188 curPos += 0x1000, page++)
190 if( curPos == 0x800000000000L )
191 curPos = 0xFFFF800000000000L;
193 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
194 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
195 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
196 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
199 if(!(PAGEMAPLVL4(page>>27) & PF_PRESENT)
200 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
201 || !(PAGEDIR(page>>9) & PF_PRESENT)
202 || !(PAGETABLE(page) & PF_PRESENT)
203 || (PAGETABLE(page) & MASK) != expected)
205 if(expected != CHANGEABLE_BITS)
207 Log("%016llx => %13llx : 0x%6llx (%c%c%c%c)",
208 CANOICAL(rangeStart),
209 PAGETABLE(rangeStart>>12) & PADDR_MASK,
211 (expected & PF_PAGED ? 'p' : '-'),
212 (expected & PF_COW ? 'C' : '-'),
213 (expected & PF_USER ? 'U' : '-'),
214 (expected & PF_WRITE ? 'W' : '-')
216 expected = CHANGEABLE_BITS;
218 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
219 page += (1 << 27) - 1;
220 curPos += (1L << 39) - 0x1000;
221 //Debug("pml4 ent unset (page = 0x%x now)", page);
224 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
225 page += (1 << 18) - 1;
226 curPos += (1L << 30) - 0x1000;
227 //Debug("pdp ent unset (page = 0x%x now)", page);
230 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
231 page += (1 << 9) - 1;
232 curPos += (1L << 21) - 0x1000;
233 //Debug("pd ent unset (page = 0x%x now)", page);
236 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
238 expected = (PAGETABLE(page) & MASK);
241 if(expected != CHANGEABLE_BITS)
245 if(expected != CHANGEABLE_BITS) {
246 Log("%016llx => %13llx : 0x%6llx (%c%c%c%c)",
247 CANOICAL(rangeStart),
248 PAGETABLE(rangeStart>>12) & PADDR_MASK,
250 (expected & PF_PAGED ? 'p' : '-'),
251 (expected & PF_COW ? 'C' : '-'),
252 (expected & PF_USER ? 'U' : '-'),
253 (expected & PF_WRITE ? 'W' : '-')
261 * \brief Get a pointer to a page entry
262 * \param Addr Virtual Address
263 * \param bTemp Use the Temporary fractal mapping
264 * \param bAllocate Allocate entries
265 * \param bLargePage Request a large page
266 * \param Pointer Location to place the calculated pointer
267 * \return Page size, or -ve on error
269 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer)
273 const int ADDR_SIZES[] = {39, 30, 21, 12};
274 const int nADDR_SIZES = sizeof(ADDR_SIZES)/sizeof(ADDR_SIZES[0]);
277 #define BITMASK(bits) ( (1LL << (bits))-1 )
281 pmlevels[3] = &TMPTABLE(0); // Page Table
282 pmlevels[2] = &TMPDIR(0); // PDIR
283 pmlevels[1] = &TMPDIRPTR(0); // PDPT
284 pmlevels[0] = &TMPMAPLVL4(0); // PML4
288 pmlevels[3] = (void*)MM_FRACTAL_BASE; // Page Table
289 pmlevels[2] = &pmlevels[3][(MM_FRACTAL_BASE>>12)&BITMASK(VIRT_BITS-12)]; // PDIR
290 pmlevels[1] = &pmlevels[2][(MM_FRACTAL_BASE>>21)&BITMASK(VIRT_BITS-21)]; // PDPT
291 pmlevels[0] = &pmlevels[1][(MM_FRACTAL_BASE>>30)&BITMASK(VIRT_BITS-30)]; // PML4
295 Addr &= (1ULL << 48)-1;
297 for( i = 0; i < nADDR_SIZES-1; i ++ )
299 // INVLPG( &pmlevels[i][ (Addr >> ADDR_SIZES[i]) &
301 // Check for a large page
302 if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) == 0 && bLargePage )
304 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
305 return ADDR_SIZES[i];
307 // Allocate an entry if required
308 if( !(pmlevels[i][Addr >> ADDR_SIZES[i]] & 1) )
310 if( !bAllocate ) return -4; // If allocation is not requested, error
311 if( !(tmp = MM_AllocPhys()) ) return -2;
312 pmlevels[i][Addr >> ADDR_SIZES[i]] = tmp | 3;
313 if( Addr < 0x800000000000 )
314 pmlevels[i][Addr >> ADDR_SIZES[i]] |= PF_USER;
315 INVLPG( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])*512 ] );
316 memset( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])*512 ], 0, 0x1000 );
317 LOG("Init PML%i ent 0x%x %p with %P", 4 - i,
319 (Addr>>ADDR_SIZES[i])<<ADDR_SIZES[i], tmp);
322 else if( pmlevels[i][Addr >> ADDR_SIZES[i]] & PF_LARGE )
325 if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) != 0 ) return -3;
326 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
327 return ADDR_SIZES[i]; // Large page warning
331 // And, set the page table entry
332 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
333 return ADDR_SIZES[i];
337 * \brief Map a physical page to a virtual one
338 * \param VAddr Target virtual address
339 * \param PAddr Physical address of page
340 * \param bTemp Use tempoary mappings
341 * \param bLarge Treat as a large page
343 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
348 ENTER("xVAddr xPAddr", VAddr, PAddr);
350 // Get page pointer (Allow allocating)
351 rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
352 if(rv < 0) LEAVE_RET('i', 0);
354 if( *ent & 1 ) LEAVE_RET('i', 0);
358 if( VAddr < 0x800000000000 )
368 * \brief Map a physical page to a virtual one
369 * \param VAddr Target virtual address
370 * \param PAddr Physical address of page
372 int MM_Map(tVAddr VAddr, tPAddr PAddr)
374 return MM_MapEx(VAddr, PAddr, 0, 0);
378 * \brief Removed a mapped page
380 void MM_Unmap(tVAddr VAddr)
383 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
385 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
387 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
389 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
394 * \brief Allocate a block of memory at the specified virtual address
396 tPAddr MM_Allocate(tVAddr VAddr)
400 ENTER("xVAddr", VAddr);
402 // Ensure the tables are allocated before the page (keeps things neat)
403 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
406 ret = MM_AllocPhys();
407 LOG("ret = %x", ret);
408 if(!ret) LEAVE_RET('i', 0);
410 if( !MM_Map(VAddr, ret) )
412 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
423 * \brief Deallocate a page at a virtual address
425 void MM_Deallocate(tVAddr VAddr)
429 phys = MM_GetPhysAddr(VAddr);
438 * \brief Get the page table entry of a virtual address
439 * \param Addr Virtual Address
440 * \param Phys Location to put the physical address
441 * \param Flags Flags on the entry (set to zero if unmapped)
442 * \return Size of the entry (in address bits) - 12 = 4KiB page
444 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags)
449 if(!Phys || !Flags) return 0;
451 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
452 if( ret < 0 ) return 0;
454 *Phys = *ptr & PADDR_MASK;
455 *Flags = *ptr & 0xFFF;
460 * \brief Get the physical address of a virtual location
462 tPAddr MM_GetPhysAddr(tVAddr Addr)
467 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
468 if( ret < 0 ) return 0;
470 if( !(*ptr & 1) ) return 0;
472 return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
476 * \brief Sets the flags on a page
478 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
484 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
487 // Ensure the entry is valid
488 if( !(*ent & 1) ) return ;
491 if( Mask & MM_PFLAG_RO )
493 if( Flags & MM_PFLAG_RO ) {
502 if( Mask & MM_PFLAG_KERNEL )
504 if( Flags & MM_PFLAG_KERNEL ) {
513 if( Mask & MM_PFLAG_COW )
515 if( Flags & MM_PFLAG_COW ) {
526 if( Mask & MM_PFLAG_EXEC )
528 if( Flags & MM_PFLAG_EXEC ) {
538 * \brief Get the flags applied to a page
540 Uint MM_GetFlags(tVAddr VAddr)
545 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
548 if( !(*ent & 1) ) return 0;
551 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
553 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
555 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
557 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
562 // --- Hardware Mappings ---
564 * \brief Map a range of hardware pages
566 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
571 //TODO: Add speedups (memory of first possible free)
572 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
574 for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
576 if( MM_GetPhysAddr(ret) != 0 ) break;
578 if( num >= 0 ) continue;
580 PAddr += 0x1000 * Number;
592 Log_KernelPanic("MM", "TODO: Implement MM_MapHWPages");
597 * \brief Free a range of hardware pages
599 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
601 // Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
611 * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
612 * \brief Allocates DMA physical memory
613 * \param Pages Number of pages required
614 * \param MaxBits Maximum number of bits the physical address can have
615 * \param PhysAddr Pointer to the location to place the physical address allocated
616 * \return Virtual address allocate
618 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
624 if(MaxBits < 12 || !PhysAddr) return 0;
627 if(Pages == 1 && MaxBits >= PHYS_BITS)
629 phys = MM_AllocPhys();
631 ret = MM_MapHWPages(phys, 1);
640 phys = MM_AllocPhysRange(Pages, MaxBits);
641 // - Was it allocated?
642 if(phys == 0) return 0;
644 // Allocated successfully, now map
645 ret = MM_MapHWPages(phys, Pages);
647 // If it didn't map, free then return 0
648 for(;Pages--;phys+=0x1000)
657 // --- Tempory Mappings ---
658 tVAddr MM_MapTemp(tPAddr PAddr)
660 const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
661 tVAddr ret = MM_TMPMAP_BASE;
664 for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
667 if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
680 void MM_FreeTemp(tVAddr VAddr)
682 MM_Deallocate(VAddr);
687 // --- Address Space Clone --
688 tPAddr MM_Clone(void)
694 // #1 Create a copy of the PML4
695 ret = MM_AllocPhys();
698 // #2 Alter the fractal pointer
699 Mutex_Acquire(&glMM_TempFractalLock);
703 // #3 Set Copy-On-Write to all user pages
704 for( i = 0; i < 256; i ++)
706 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
707 // Log_Debug("MM", "TMPMAPLVL4(%i) = 0x%016llx", i, TMPMAPLVL4(i));
708 if( TMPMAPLVL4(i) & 1 )
710 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
711 TMPMAPLVL4(i) |= PF_COW;
712 TMPMAPLVL4(i) &= ~PF_WRITE;
716 // #4 Map in kernel pages
717 for( i = 256; i < 512; i ++ )
720 // 320 0xFFFFA.... - Kernel Stacks
721 if( i == 320 ) continue;
722 // 509 0xFFFFFE0.. - Fractal mapping
723 if( i == 508 ) continue;
724 // 510 0xFFFFFE8.. - Temp fractal mapping
725 if( i == 509 ) continue;
727 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
728 if( TMPMAPLVL4(i) & 1 )
729 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
732 // #5 Set fractal mapping
733 TMPMAPLVL4(508) = ret | 3;
734 TMPMAPLVL4(509) = 0; // Temp
736 // #6 Create kernel stack
737 // tThread->KernelStack is the top
738 // There is 1 guard page below the stack
739 kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
741 // Log("MM_Clone: kstackbase = %p", kstackbase);
743 TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
744 for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
746 tPAddr phys = MM_AllocPhys();
748 MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
750 Log_Debug("MM", "MM_Clone: Cloning stack page %p from %P to %P",
751 kstackbase+i*0x1000, MM_GetPhysAddr( kstackbase+i*0x1000 ), phys
753 tmpmapping = MM_MapTemp(phys);
754 if( MM_GetPhysAddr( kstackbase+i*0x1000 ) )
755 memcpy((void*)tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
757 memset((void*)tmpmapping, 0, 0x1000);
759 // Debug_HexDump("MM_Clone: *tmpmapping = ", (void*)tmpmapping, 0x1000);
760 MM_FreeTemp(tmpmapping);
768 Mutex_Release(&glMM_TempFractalLock);
769 // Log("MM_Clone: RETURN %P", ret);
773 void MM_ClearUser(void)
776 int pml4, pdpt, pd, pt;
778 for( pml4 = 0; pml4 < 256; pml4 ++ )
780 // Catch an un-allocated PML4 entry
781 if( !(PAGEMAPLVL4(pml4) & 1) ) {
782 addr += 1ULL << PML4_SHIFT;
787 if( (PAGEMAPLVL4(pml4) & PF_COW) ) {
788 addr += 1ULL << PML4_SHIFT;
795 for( pdpt = 0; pdpt < 512; pdpt ++ )
798 if( !(PAGEDIRPTR(addr >> PDP_SHIFT) & 1) ) {
799 addr += 1ULL << PDP_SHIFT;
804 if( (PAGEDIRPTR(addr >> PDP_SHIFT) & PF_COW) ) {
805 addr += 1ULL << PDP_SHIFT;
809 for( pd = 0; pd < 512; pd ++ )
811 // Unallocated PDir entry
812 if( !(PAGEDIR(addr >> PDIR_SHIFT) & 1) ) {
813 addr += 1ULL << PDIR_SHIFT;
818 if( PAGEDIR(addr >> PDIR_SHIFT) & PF_COW ) {
819 addr += 1ULL << PDIR_SHIFT;
823 // TODO: Catch large pages
826 for( pt = 0; pt < 512; pt ++ )
829 if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
830 MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
831 PAGETABLE(addr >> PTAB_SHIFT) = 0;
837 MM_DerefPhys( PAGEDIR(addr >> PDIR_SHIFT) & PADDR_MASK );
838 PAGEDIR(addr >> PDIR_SHIFT) = 0;
841 // Free page directory
842 MM_DerefPhys( PAGEDIRPTR(addr >> PDP_SHIFT) & PADDR_MASK );
843 PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
846 // Free page directory pointer table (PML4 entry)
847 MM_DerefPhys( PAGEMAPLVL4(pml4) & PADDR_MASK );
848 PAGEMAPLVL4(pml4) = 0;
852 tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize)
857 // #1 Set temp fractal to PID0
858 Mutex_Acquire(&glMM_TempFractalLock);
859 TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
861 // #2 Scan for a free stack addresss < 2^47
862 for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
865 if( MM_GetPageEntryPtr(ret, 1, 0, 0, &ptr) == 0 ) break;
866 if( !(*ptr & 1) ) break;
868 if( ret >= (1ULL << 47) ) {
869 Mutex_Release(&glMM_TempFractalLock);
873 // #3 Map all save the last page in the range
874 // - This acts as as guard page, and doesn't cost us anything.
875 for( i = 0; i < KERNEL_STACK_SIZE/0x1000 - 1; i ++ )
877 tPAddr phys = MM_AllocPhys();
880 Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
883 MM_MapEx(ret + i*0x1000, phys, 1, 0);
886 if( StackSize > 0x1000 ) {
887 Log_Error("MM", "MM_NewWorkerStack: StackSize(0x%x) > 0x1000, cbf handling", StackSize);
892 MM_GetPageEntryPtr(ret + i*0x1000, 1, 0, 0, &ptr);
893 paddr = *ptr & ~0xFFF;
894 tmp_addr = MM_MapTemp(paddr);
895 memcpy( (void*)(tmp_addr + (0x1000 - StackSize)), StackData, StackSize );
896 MM_FreeTemp(tmp_addr);
899 Mutex_Release(&glMM_TempFractalLock);
901 return ret + i*0x1000;
905 * \brief Allocate a new kernel stack
907 tVAddr MM_NewKStack(void)
909 tVAddr base = MM_KSTACK_BASE;
911 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
913 if(MM_GetPhysAddr(base+KERNEL_STACK_SIZE-0x1000) != 0)
916 //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
917 for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
919 if( !MM_Allocate(base+i) )
921 Log_Warning("MM", "MM_NewKStack - Allocation failed");
922 for( i -= 0x1000; i; i -= 0x1000)
923 MM_Deallocate(base+i);
928 return base + KERNEL_STACK_SIZE;
930 Log_Warning("MM", "MM_NewKStack - No address space left\n");