4 * Virtual Memory Manager
9 #include <threads_int.h>
13 #define PHYS_BITS 52 // TODO: Move out
21 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
22 #define PAGE_MASK ((1LL << 36)-1)
23 #define TABLE_MASK ((1LL << 27)-1)
24 #define PDP_MASK ((1LL << 18)-1)
25 #define PML4_MASK ((1LL << 9)-1)
27 #define PF_PRESENT 0x001
28 #define PF_WRITE 0x002
30 #define PF_LARGE 0x080
31 #define PF_GLOBAL 0x100
33 #define PF_PAGED 0x400
34 #define PF_NX 0x80000000##00000000
37 #define PAGETABLE(idx) (*((Uint64*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
38 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
39 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
40 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
42 #define TMPCR3() PAGEMAPLVL4(MM_TMPFRAC_BASE>>39)
43 #define TMPTABLE(idx) (*((Uint64*)MM_TMPFRAC_BASE+((idx)&PAGE_MASK)))
44 #define TMPDIR(idx) PAGETABLE((MM_TMPFRAC_BASE>>12)+((idx)&TABLE_MASK))
45 #define TMPDIRPTR(idx) PAGEDIR((MM_TMPFRAC_BASE>>21)+((idx)&PDP_MASK))
46 #define TMPMAPLVL4(idx) PAGEDIRPTR((MM_TMPFRAC_BASE>>30)+((idx)&PML4_MASK))
48 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr))
49 #define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
50 #define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
53 //tPAddr * const gaPageTable = MM_FRACTAL_BASE;
56 extern void Error_Backtrace(Uint IP, Uint BP);
57 extern tPAddr gInitialPML4[512];
58 extern void Threads_SegFault(tVAddr Addr);
59 extern char _UsertextBase[];
62 void MM_InitVirt(void);
63 //void MM_FinishVirtualInit(void);
64 void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable );
65 int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
66 void MM_DumpTables(tVAddr Start, tVAddr End);
67 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
68 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
69 // int MM_Map(tVAddr VAddr, tPAddr PAddr);
70 void MM_Unmap(tVAddr VAddr);
71 void MM_ClearUser(void);
72 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
75 tMutex glMM_TempFractalLock;
78 void MM_InitVirt(void)
80 Log_Debug("MMVirt", "&PAGEMAPLVL4(0) = %p", &PAGEMAPLVL4(0));
81 // MM_DumpTables(0, -1L);
84 void MM_FinishVirtualInit(void)
90 * \brief Clone a page from an entry
91 * \param Ent Pointer to the entry in the PML4/PDP/PD/PT
92 * \param NextLevel Pointer to contents of the entry
93 * \param Addr Dest address
96 void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable )
98 tPAddr curpage = *Ent & PADDR_MASK;
99 if( MM_GetRefCount( curpage ) <= 0 ) {
100 Log_KernelPanic("MMVirt", "Page %P still marked COW, but unreferenced", curpage);
102 // Log_Debug("MM_Virt", "%P refcount %i", curpage, MM_GetRefCount( curpage ));
103 if( MM_GetRefCount( curpage ) == 1 )
106 *Ent |= PF_PRESENT|PF_WRITE;
107 // Log_Debug("MMVirt", "COW ent at %p (%p), last (%P)", Ent, NextLevel, curpage);
114 if( !(paddr = MM_AllocPhys()) ) {
115 Threads_SegFault(Addr);
119 ASSERT(paddr != curpage);
121 tmp = (void*)MM_MapTemp(paddr);
122 memcpy( tmp, NextLevel, 0x1000 );
123 MM_FreeTemp( (tVAddr)tmp );
125 // Log_Debug("MMVirt", "COW ent at %p (%p) from %P to %P", Ent, NextLevel, curpage, paddr);
127 MM_DerefPhys( curpage );
129 *Ent |= paddr|PF_PRESENT|PF_WRITE;
131 INVLPG( (tVAddr)NextLevel );
136 Uint64 *dp = NextLevel;
138 for( i = 0; i < 512; i ++ )
140 if( !(dp[i] & PF_PRESENT) ) continue;
141 MM_RefPhys( dp[i] & PADDR_MASK );
142 if( dp[i] & PF_WRITE ) {
151 * \brief Called on a page fault
153 int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
155 // TODO: Implement Copy-on-Write
157 if( PAGEMAPLVL4(Addr>>39) & PF_PRESENT
158 && PAGEDIRPTR (Addr>>30) & PF_PRESENT
159 && PAGEDIR (Addr>>21) & PF_PRESENT
160 && PAGETABLE (Addr>>12) & PF_PRESENT )
163 if( PAGEMAPLVL4(Addr>>39) & PF_COW )
165 tPAddr *dp = &PAGEDIRPTR((Addr>>39)*512);
166 MM_int_ClonePageEnt( &PAGEMAPLVL4(Addr>>39), dp, Addr, 1 );
167 // MM_DumpTables(Addr>>39 << 39, (((Addr>>39) + 1) << 39) - 1);
170 if( PAGEDIRPTR(Addr>>30) & PF_COW )
172 tPAddr *dp = &PAGEDIR( (Addr>>30)*512 );
173 MM_int_ClonePageEnt( &PAGEDIRPTR(Addr>>30), dp, Addr, 1 );
174 // MM_DumpTables(Addr>>30 << 30, (((Addr>>30) + 1) << 30) - 1);
177 if( PAGEDIR(Addr>>21) & PF_COW )
179 tPAddr *dp = &PAGETABLE( (Addr>>21)*512 );
180 MM_int_ClonePageEnt( &PAGEDIR(Addr>>21), dp, Addr, 1 );
181 // MM_DumpTables(Addr>>21 << 21, (((Addr>>21) + 1) << 21) - 1);
184 if( PAGETABLE(Addr>>12) & PF_COW )
186 MM_int_ClonePageEnt( &PAGETABLE(Addr>>12), (void*)(Addr & ~0xFFF), Addr, 0 );
187 INVLPG( Addr & ~0xFFF );
193 // If it was a user, tell the thread handler
195 Warning("User %s %s memory%s",
196 (ErrorCode&2?"write to":"read from"),
197 (ErrorCode&1?"bad/locked":"non-present"),
198 (ErrorCode&16?" (Instruction Fetch)":"")
200 Warning("User Pagefault: Instruction at %04x:%p accessed %p",
201 Regs->CS, Regs->RIP, Addr);
202 __asm__ __volatile__ ("sti"); // Restart IRQs
203 Threads_SegFault(Addr);
209 // -- Check Error Code --
211 Warning("Reserved Bits Trashed!");
214 Warning("Kernel %s %s memory%s",
215 (ErrorCode&2?"write to":"read from"),
216 (ErrorCode&1?"bad/locked":"non-present"),
217 (ErrorCode&16?" (Instruction Fetch)":"")
221 Log("Code at %p accessed %p", Regs->RIP, Addr);
222 // Print Stack Backtrace
223 Error_Backtrace(Regs->RIP, Regs->RBP);
225 MM_DumpTables(0, -1);
231 * \brief Dumps the layout of the page tables
233 void MM_DumpTables(tVAddr Start, tVAddr End)
235 #define CANOICAL(addr) ((addr)&0x800000000000?(addr)|0xFFFF000000000000:(addr))
236 const tPAddr CHANGEABLE_BITS = ~(PF_PRESENT|PF_WRITE|PF_USER|PF_COW|PF_PAGED) & 0xFFF;
237 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
238 tVAddr rangeStart = 0;
239 tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
243 Log("Table Entries: (%p to %p)", Start, End);
245 End &= (1L << 48) - 1;
247 Start >>= 12; End >>= 12;
249 for(page = Start, curPos = Start<<12;
251 curPos += 0x1000, page++)
253 if( curPos == 0x800000000000L )
254 curPos = 0xFFFF800000000000L;
256 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
257 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
258 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
259 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
262 if(!(PAGEMAPLVL4(page>>27) & PF_PRESENT)
263 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
264 || !(PAGEDIR(page>>9) & PF_PRESENT)
265 || !(PAGETABLE(page) & PF_PRESENT)
266 || (PAGETABLE(page) & MASK) != expected)
268 if(expected != CHANGEABLE_BITS)
270 Log("%016llx => %13llx : 0x%6llx (%c%c%c%c)",
271 CANOICAL(rangeStart),
272 PAGETABLE(rangeStart>>12) & PADDR_MASK,
274 (expected & PF_PAGED ? 'p' : '-'),
275 (expected & PF_COW ? 'C' : '-'),
276 (expected & PF_USER ? 'U' : '-'),
277 (expected & PF_WRITE ? 'W' : '-')
279 expected = CHANGEABLE_BITS;
281 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
282 page += (1 << 27) - 1;
283 curPos += (1L << 39) - 0x1000;
286 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
287 page += (1 << 18) - 1;
288 curPos += (1L << 30) - 0x1000;
291 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
292 page += (1 << 9) - 1;
293 curPos += (1L << 21) - 0x1000;
296 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
298 expected = (PAGETABLE(page) & MASK);
301 if(expected != CHANGEABLE_BITS)
305 if(expected != CHANGEABLE_BITS) {
306 Log("%016llx => %13llx : 0x%6llx (%c%c%c%c)",
307 CANOICAL(rangeStart),
308 PAGETABLE(rangeStart>>12) & PADDR_MASK,
310 (expected & PF_PAGED ? 'p' : '-'),
311 (expected & PF_COW ? 'C' : '-'),
312 (expected & PF_USER ? 'U' : '-'),
313 (expected & PF_WRITE ? 'W' : '-')
321 * \brief Get a pointer to a page entry
322 * \param Addr Virtual Address
323 * \param bTemp Use the Temporary fractal mapping
324 * \param bAllocate Allocate entries
325 * \param bLargePage Request a large page
326 * \param Pointer Location to place the calculated pointer
327 * \return Page size, or -ve on error
329 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer)
333 const int ADDR_SIZES[] = {39, 30, 21, 12};
334 const int nADDR_SIZES = sizeof(ADDR_SIZES)/sizeof(ADDR_SIZES[0]);
337 #define BITMASK(bits) ( (1LL << (bits))-1 )
341 pmlevels[3] = &TMPTABLE(0); // Page Table
342 pmlevels[2] = &TMPDIR(0); // PDIR
343 pmlevels[1] = &TMPDIRPTR(0); // PDPT
344 pmlevels[0] = &TMPMAPLVL4(0); // PML4
348 pmlevels[3] = (void*)MM_FRACTAL_BASE; // Page Table
349 pmlevels[2] = &pmlevels[3][(MM_FRACTAL_BASE>>12)&BITMASK(VIRT_BITS-12)]; // PDIR
350 pmlevels[1] = &pmlevels[2][(MM_FRACTAL_BASE>>21)&BITMASK(VIRT_BITS-21)]; // PDPT
351 pmlevels[0] = &pmlevels[1][(MM_FRACTAL_BASE>>30)&BITMASK(VIRT_BITS-30)]; // PML4
355 Addr &= (1ULL << 48)-1;
357 for( i = 0; i < nADDR_SIZES-1; i ++ )
359 // INVLPG( &pmlevels[i][ (Addr >> ADDR_SIZES[i]) &
361 // Check for a large page
362 if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) == 0 && bLargePage )
364 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
365 return ADDR_SIZES[i];
367 // Allocate an entry if required
368 if( !(pmlevels[i][Addr >> ADDR_SIZES[i]] & 1) )
370 if( !bAllocate ) return -4; // If allocation is not requested, error
371 if( !(tmp = MM_AllocPhys()) ) return -2;
372 pmlevels[i][Addr >> ADDR_SIZES[i]] = tmp | 3;
373 if( Addr < 0x800000000000 )
374 pmlevels[i][Addr >> ADDR_SIZES[i]] |= PF_USER;
375 INVLPG( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])*512 ] );
376 memset( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])*512 ], 0, 0x1000 );
377 LOG("Init PML%i ent 0x%x %p with %P", 4 - i,
379 (Addr>>ADDR_SIZES[i])<<ADDR_SIZES[i], tmp);
382 else if( pmlevels[i][Addr >> ADDR_SIZES[i]] & PF_LARGE )
385 if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) != 0 ) return -3;
386 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
387 return ADDR_SIZES[i]; // Large page warning
391 // And, set the page table entry
392 if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
393 return ADDR_SIZES[i];
397 * \brief Map a physical page to a virtual one
398 * \param VAddr Target virtual address
399 * \param PAddr Physical address of page
400 * \param bTemp Use tempoary mappings
401 * \param bLarge Treat as a large page
403 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
408 ENTER("xVAddr xPAddr", VAddr, PAddr);
410 // Get page pointer (Allow allocating)
411 rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
412 if(rv < 0) LEAVE_RET('i', 0);
414 if( *ent & 1 ) LEAVE_RET('i', 0);
418 if( VAddr < 0x800000000000 )
428 * \brief Map a physical page to a virtual one
429 * \param VAddr Target virtual address
430 * \param PAddr Physical address of page
432 int MM_Map(tVAddr VAddr, tPAddr PAddr)
434 return MM_MapEx(VAddr, PAddr, 0, 0);
438 * \brief Removed a mapped page
440 void MM_Unmap(tVAddr VAddr)
443 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
445 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
447 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
449 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
454 * \brief Allocate a block of memory at the specified virtual address
456 tPAddr MM_Allocate(tVAddr VAddr)
460 ENTER("xVAddr", VAddr);
462 // Ensure the tables are allocated before the page (keeps things neat)
463 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
466 ret = MM_AllocPhys();
467 LOG("ret = %x", ret);
468 if(!ret) LEAVE_RET('i', 0);
470 if( !MM_Map(VAddr, ret) )
472 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
483 * \brief Deallocate a page at a virtual address
485 void MM_Deallocate(tVAddr VAddr)
489 phys = MM_GetPhysAddr(VAddr);
498 * \brief Get the page table entry of a virtual address
499 * \param Addr Virtual Address
500 * \param Phys Location to put the physical address
501 * \param Flags Flags on the entry (set to zero if unmapped)
502 * \return Size of the entry (in address bits) - 12 = 4KiB page
504 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags)
509 if(!Phys || !Flags) return 0;
511 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
512 if( ret < 0 ) return 0;
514 *Phys = *ptr & PADDR_MASK;
515 *Flags = *ptr & 0xFFF;
520 * \brief Get the physical address of a virtual location
522 tPAddr MM_GetPhysAddr(tVAddr Addr)
527 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
528 if( ret < 0 ) return 0;
530 if( !(*ptr & 1) ) return 0;
532 return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
536 * \brief Sets the flags on a page
538 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
544 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
547 // Ensure the entry is valid
548 if( !(*ent & 1) ) return ;
551 if( Mask & MM_PFLAG_RO )
553 if( Flags & MM_PFLAG_RO ) {
562 if( Mask & MM_PFLAG_KERNEL )
564 if( Flags & MM_PFLAG_KERNEL ) {
573 if( Mask & MM_PFLAG_COW )
575 if( Flags & MM_PFLAG_COW ) {
586 if( Mask & MM_PFLAG_EXEC )
588 if( Flags & MM_PFLAG_EXEC ) {
598 * \brief Get the flags applied to a page
600 Uint MM_GetFlags(tVAddr VAddr)
605 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
608 if( !(*ent & 1) ) return 0;
611 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
613 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
615 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
617 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
622 // --- Hardware Mappings ---
624 * \brief Map a range of hardware pages
626 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
631 //TODO: Add speedups (memory of first possible free)
632 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
634 for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
636 if( MM_GetPhysAddr(ret) != 0 ) break;
638 if( num >= 0 ) continue;
640 PAddr += 0x1000 * Number;
653 Log_KernelPanic("MM", "TODO: Implement MM_MapHWPages");
658 * \brief Free a range of hardware pages
660 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
662 // Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
665 MM_DerefPhys( MM_GetPhysAddr(VAddr) );
673 * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
674 * \brief Allocates DMA physical memory
675 * \param Pages Number of pages required
676 * \param MaxBits Maximum number of bits the physical address can have
677 * \param PhysAddr Pointer to the location to place the physical address allocated
678 * \return Virtual address allocate
680 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
686 if(MaxBits < 12 || !PhysAddr) return 0;
689 if(Pages == 1 && MaxBits >= PHYS_BITS)
691 phys = MM_AllocPhys();
693 ret = MM_MapHWPages(phys, 1);
699 phys = MM_AllocPhysRange(Pages, MaxBits);
700 // - Was it allocated?
701 if(phys == 0) return 0;
703 // Allocated successfully, now map
704 ret = MM_MapHWPages(phys, Pages);
705 // MapHWPages references the pages, so deref them back down to 1
706 for(;Pages--;phys+=0x1000)
709 // If it didn't map, free then return 0
717 // --- Tempory Mappings ---
718 tVAddr MM_MapTemp(tPAddr PAddr)
720 const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
721 tVAddr ret = MM_TMPMAP_BASE;
724 for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
727 if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
742 void MM_FreeTemp(tVAddr VAddr)
744 MM_Deallocate(VAddr);
749 // --- Address Space Clone --
750 tPAddr MM_Clone(void)
756 // #1 Create a copy of the PML4
757 ret = MM_AllocPhys();
760 // #2 Alter the fractal pointer
761 Mutex_Acquire(&glMM_TempFractalLock);
765 // #3 Set Copy-On-Write to all user pages
766 for( i = 0; i < 256; i ++)
768 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
769 // Log_Debug("MM", "TMPMAPLVL4(%i) = 0x%016llx", i, TMPMAPLVL4(i));
770 if( !(TMPMAPLVL4(i) & PF_PRESENT) ) continue ;
772 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
774 if( TMPMAPLVL4(i) & PF_WRITE ) {
775 TMPMAPLVL4(i) |= PF_COW;
776 TMPMAPLVL4(i) &= ~PF_WRITE;
780 // #4 Map in kernel pages
781 for( i = 256; i < 512; i ++ )
784 // 320 0xFFFFA.... - Kernel Stacks
785 if( i == 320 ) continue;
786 // 509 0xFFFFFE0.. - Fractal mapping
787 if( i == 508 ) continue;
788 // 510 0xFFFFFE8.. - Temp fractal mapping
789 if( i == 509 ) continue;
791 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
792 if( TMPMAPLVL4(i) & 1 )
793 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
796 // #5 Set fractal mapping
797 TMPMAPLVL4(508) = ret | 3;
798 TMPMAPLVL4(509) = 0; // Temp
800 // #6 Create kernel stack
801 // tThread->KernelStack is the top
802 // There is 1 guard page below the stack
803 kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
805 Log("MM_Clone: kstackbase = %p", kstackbase);
807 TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
808 for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
810 tPAddr phys = MM_AllocPhys();
812 MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
814 Log_Debug("MM", "MM_Clone: Cloning stack page %p from %P to %P",
815 kstackbase+i*0x1000, MM_GetPhysAddr( kstackbase+i*0x1000 ), phys
817 tmpmapping = MM_MapTemp(phys);
818 if( MM_GetPhysAddr( kstackbase+i*0x1000 ) )
819 memcpy((void*)tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
821 memset((void*)tmpmapping, 0, 0x1000);
823 // Debug_HexDump("MM_Clone: *tmpmapping = ", (void*)tmpmapping, 0x1000);
824 MM_FreeTemp(tmpmapping);
832 Mutex_Release(&glMM_TempFractalLock);
833 // Log("MM_Clone: RETURN %P", ret);
837 void MM_ClearUser(void)
840 int pml4, pdpt, pd, pt;
842 for( pml4 = 0; pml4 < 256; pml4 ++ )
844 // Catch an un-allocated PML4 entry
845 if( !(PAGEMAPLVL4(pml4) & 1) ) {
846 addr += 1ULL << PML4_SHIFT;
851 if( (PAGEMAPLVL4(pml4) & PF_COW) ) {
852 addr += 1ULL << PML4_SHIFT;
859 for( pdpt = 0; pdpt < 512; pdpt ++ )
862 if( !(PAGEDIRPTR(addr >> PDP_SHIFT) & 1) ) {
863 addr += 1ULL << PDP_SHIFT;
868 if( (PAGEDIRPTR(addr >> PDP_SHIFT) & PF_COW) ) {
869 addr += 1ULL << PDP_SHIFT;
873 for( pd = 0; pd < 512; pd ++ )
875 // Unallocated PDir entry
876 if( !(PAGEDIR(addr >> PDIR_SHIFT) & 1) ) {
877 addr += 1ULL << PDIR_SHIFT;
882 if( PAGEDIR(addr >> PDIR_SHIFT) & PF_COW ) {
883 addr += 1ULL << PDIR_SHIFT;
887 // TODO: Catch large pages
890 for( pt = 0; pt < 512; pt ++ )
893 if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
894 MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
895 PAGETABLE(addr >> PTAB_SHIFT) = 0;
901 MM_DerefPhys( PAGEDIR(addr >> PDIR_SHIFT) & PADDR_MASK );
902 PAGEDIR(addr >> PDIR_SHIFT) = 0;
905 // Free page directory
906 MM_DerefPhys( PAGEDIRPTR(addr >> PDP_SHIFT) & PADDR_MASK );
907 PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
910 // Free page directory pointer table (PML4 entry)
911 MM_DerefPhys( PAGEMAPLVL4(pml4) & PADDR_MASK );
912 PAGEMAPLVL4(pml4) = 0;
916 tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize)
921 // #1 Set temp fractal to PID0
922 Mutex_Acquire(&glMM_TempFractalLock);
923 TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
925 // #2 Scan for a free stack addresss < 2^47
926 for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
929 if( MM_GetPageEntryPtr(ret, 1, 0, 0, &ptr) <= 0 ) break;
930 if( !(*ptr & 1) ) break;
932 if( ret >= (1ULL << 47) ) {
933 Mutex_Release(&glMM_TempFractalLock);
937 // #3 Map all save the last page in the range
938 // - This acts as as guard page, and doesn't cost us anything.
939 for( i = 0; i < KERNEL_STACK_SIZE/0x1000 - 1; i ++ )
941 tPAddr phys = MM_AllocPhys();
944 Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
947 MM_MapEx(ret + i*0x1000, phys, 1, 0);
950 if( StackSize > 0x1000 ) {
951 Log_Error("MM", "MM_NewWorkerStack: StackSize(0x%x) > 0x1000, cbf handling", StackSize);
956 MM_GetPageEntryPtr(ret + i*0x1000, 1, 0, 0, &ptr);
957 paddr = *ptr & ~0xFFF;
958 tmp_addr = MM_MapTemp(paddr);
959 memcpy( (void*)(tmp_addr + (0x1000 - StackSize)), StackData, StackSize );
960 MM_FreeTemp(tmp_addr);
963 Mutex_Release(&glMM_TempFractalLock);
965 return ret + i*0x1000;
969 * \brief Allocate a new kernel stack
971 tVAddr MM_NewKStack(void)
973 tVAddr base = MM_KSTACK_BASE;
975 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
977 if(MM_GetPhysAddr(base+KERNEL_STACK_SIZE-0x1000) != 0)
980 //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
981 for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
983 if( !MM_Allocate(base+i) )
985 Log_Warning("MM", "MM_NewKStack - Allocation failed");
986 for( i -= 0x1000; i; i -= 0x1000)
987 MM_Deallocate(base+i);
992 return base + KERNEL_STACK_SIZE;
994 Log_Warning("MM", "MM_NewKStack - No address space left\n");