4 * Virtual Memory Manager
9 #include <threads_int.h>
13 #define PHYS_BITS 52 // TODO: Move out
21 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
22 #define PAGE_MASK ((1LL << 36)-1)
23 #define TABLE_MASK ((1LL << 27)-1)
24 #define PDP_MASK ((1LL << 18)-1)
25 #define PML4_MASK ((1LL << 9)-1)
27 #define PF_PRESENT 0x001
28 #define PF_WRITE 0x002
30 #define PF_LARGE 0x080
31 #define PF_GLOBAL 0x100
33 #define PF_PAGED 0x400
34 #define PF_NX 0x80000000##00000000
37 #define PAGETABLE(idx) (*((Uint64*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
38 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
39 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
40 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
42 #define TMPCR3() PAGEMAPLVL4(MM_TMPFRAC_BASE>>39)
43 #define TMPTABLE(idx) (*((Uint64*)MM_TMPFRAC_BASE+((idx)&PAGE_MASK)))
44 #define TMPDIR(idx) PAGETABLE((MM_TMPFRAC_BASE>>12)+((idx)&TABLE_MASK))
45 #define TMPDIRPTR(idx) PAGEDIR((MM_TMPFRAC_BASE>>21)+((idx)&PDP_MASK))
46 #define TMPMAPLVL4(idx) PAGEDIRPTR((MM_TMPFRAC_BASE>>30)+((idx)&PML4_MASK))
48 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr))
49 #define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
50 #define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
53 //tPAddr * const gaPageTable = MM_FRACTAL_BASE;
56 extern void Error_Backtrace(Uint IP, Uint BP);
57 extern tPAddr gInitialPML4[512];
58 extern void Threads_SegFault(tVAddr Addr);
59 extern char _UsertextBase[];
62 void MM_InitVirt(void);
63 //void MM_FinishVirtualInit(void);
64 void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable );
65 int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
66 void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected);
67 void MM_DumpTables(tVAddr Start, tVAddr End);
68 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
69 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
70 // int MM_Map(tVAddr VAddr, tPAddr PAddr);
71 void MM_Unmap(tVAddr VAddr);
72 void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts);
73 void MM_ClearUser(void);
74 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
77 tMutex glMM_TempFractalLock;
81 void MM_InitVirt(void)
83 // Log_Debug("MMVirt", "&PAGEMAPLVL4(0) = %p", &PAGEMAPLVL4(0));
84 // MM_DumpTables(0, -1L);
87 void MM_FinishVirtualInit(void)
93 * \brief Clone a page from an entry
94 * \param Ent Pointer to the entry in the PML4/PDP/PD/PT
95 * \param NextLevel Pointer to contents of the entry
96 * \param Addr Dest address
99 void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable )
101 tPAddr curpage = *Ent & PADDR_MASK;
102 if( MM_GetRefCount( curpage ) <= 0 ) {
103 Log_KernelPanic("MMVirt", "Page %P still marked COW, but unreferenced", curpage);
105 if( MM_GetRefCount( curpage ) == 1 )
108 *Ent |= PF_PRESENT|PF_WRITE;
109 // Log_Debug("MMVirt", "COW ent at %p (%p) only %P", Ent, NextLevel, curpage);
116 if( !(paddr = MM_AllocPhys()) ) {
117 Threads_SegFault(Addr);
121 ASSERT(paddr != curpage);
123 tmp = (void*)MM_MapTemp(paddr);
124 memcpy( tmp, NextLevel, 0x1000 );
125 MM_FreeTemp( (tVAddr)tmp );
127 // Log_Debug("MMVirt", "COW ent at %p (%p) from %P to %P", Ent, NextLevel, curpage, paddr);
129 MM_DerefPhys( curpage );
131 *Ent |= paddr|PF_PRESENT|PF_WRITE;
133 INVLPG( (tVAddr)NextLevel );
138 Uint64 *dp = NextLevel;
140 for( i = 0; i < 512; i ++ )
142 if( !(dp[i] & PF_PRESENT) ) continue;
143 MM_RefPhys( dp[i] & PADDR_MASK );
144 if( dp[i] & PF_WRITE ) {
153 * \brief Called on a page fault
155 int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
157 // TODO: Implement Copy-on-Write
159 if( PAGEMAPLVL4(Addr>>39) & PF_PRESENT
160 && PAGEDIRPTR (Addr>>30) & PF_PRESENT
161 && PAGEDIR (Addr>>21) & PF_PRESENT
162 && PAGETABLE (Addr>>12) & PF_PRESENT )
165 if( PAGEMAPLVL4(Addr>>39) & PF_COW )
167 tPAddr *dp = &PAGEDIRPTR((Addr>>39)*512);
168 MM_int_ClonePageEnt( &PAGEMAPLVL4(Addr>>39), dp, Addr, 1 );
169 // MM_DumpTables(Addr>>39 << 39, (((Addr>>39) + 1) << 39) - 1);
172 if( PAGEDIRPTR(Addr>>30) & PF_COW )
174 tPAddr *dp = &PAGEDIR( (Addr>>30)*512 );
175 MM_int_ClonePageEnt( &PAGEDIRPTR(Addr>>30), dp, Addr, 1 );
176 // MM_DumpTables(Addr>>30 << 30, (((Addr>>30) + 1) << 30) - 1);
179 if( PAGEDIR(Addr>>21) & PF_COW )
181 tPAddr *dp = &PAGETABLE( (Addr>>21)*512 );
182 MM_int_ClonePageEnt( &PAGEDIR(Addr>>21), dp, Addr, 1 );
183 // MM_DumpTables(Addr>>21 << 21, (((Addr>>21) + 1) << 21) - 1);
186 if( PAGETABLE(Addr>>12) & PF_COW )
188 MM_int_ClonePageEnt( &PAGETABLE(Addr>>12), (void*)(Addr & ~0xFFF), Addr, 0 );
189 INVLPG( Addr & ~0xFFF );
195 // If it was a user, tell the thread handler
197 Warning("User %s %s memory%s",
198 (ErrorCode&2?"write to":"read from"),
199 (ErrorCode&1?"bad/locked":"non-present"),
200 (ErrorCode&16?" (Instruction Fetch)":"")
202 Warning("User Pagefault: Instruction at %04x:%p accessed %p",
203 Regs->CS, Regs->RIP, Addr);
204 __asm__ __volatile__ ("sti"); // Restart IRQs
205 Threads_SegFault(Addr);
211 // -- Check Error Code --
213 Warning("Reserved Bits Trashed!");
216 Warning("Kernel %s %s memory%s",
217 (ErrorCode&2?"write to":"read from"),
218 (ErrorCode&1?"bad/locked":"non-present"),
219 (ErrorCode&16?" (Instruction Fetch)":"")
223 Log("Thread %i - Code at %p accessed %p", Threads_GetTID(), Regs->RIP, Addr);
224 // Print Stack Backtrace
225 Error_Backtrace(Regs->RIP, Regs->RBP);
227 MM_DumpTables(0, -1);
232 void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected)
234 #define CANOICAL(addr) ((addr)&0x800000000000?(addr)|0xFFFF000000000000:(addr))
235 LogF("%6llx %6llx %6llx %016llx => ",
236 MM_GetPhysAddr( (tVAddr)&PAGEDIRPTR(RangeStart>>30) ),
237 MM_GetPhysAddr( (tVAddr)&PAGEDIR(RangeStart>>21) ),
238 MM_GetPhysAddr( (tVAddr)&PAGETABLE(RangeStart>>12) ),
241 if( gMM_ZeroPage && (PAGETABLE(RangeStart>>12) & PADDR_MASK) == gMM_ZeroPage )
242 LogF("%13s", "zero" );
244 LogF("%13llx", PAGETABLE(RangeStart>>12) & PADDR_MASK );
245 LogF(" : 0x%6llx (%c%c%c%c)\r\n",
247 (Expected & PF_PAGED ? 'p' : '-'),
248 (Expected & PF_COW ? 'C' : '-'),
249 (Expected & PF_USER ? 'U' : '-'),
250 (Expected & PF_WRITE ? 'W' : '-')
256 * \brief Dumps the layout of the page tables
258 void MM_DumpTables(tVAddr Start, tVAddr End)
260 const tPAddr CHANGEABLE_BITS = ~(PF_PRESENT|PF_WRITE|PF_USER|PF_COW|PF_PAGED) & 0xFFF;
261 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
262 tVAddr rangeStart = 0;
263 tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
267 Log("Table Entries: (%p to %p)", Start, End);
269 End &= (1L << 48) - 1;
271 Start >>= 12; End >>= 12;
273 for(page = Start, curPos = Start<<12;
275 curPos += 0x1000, page++)
277 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
278 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
279 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
280 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
283 if(!(PAGEMAPLVL4(page>>27) & PF_PRESENT)
284 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
285 || !(PAGEDIR(page>>9) & PF_PRESENT)
286 || !(PAGETABLE(page) & PF_PRESENT)
287 || (PAGETABLE(page) & MASK) != expected)
289 if(expected != CHANGEABLE_BITS)
291 MM_int_DumpTablesEnt( rangeStart, curPos - rangeStart, expected );
292 expected = CHANGEABLE_BITS;
295 if( curPos == 0x800000000000L )
296 curPos = 0xFFFF800000000000L;
298 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
299 page += (1 << 27) - 1;
300 curPos += (1L << 39) - 0x1000;
303 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
304 page += (1 << 18) - 1;
305 curPos += (1L << 30) - 0x1000;
308 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
309 page += (1 << 9) - 1;
310 curPos += (1L << 21) - 0x1000;
313 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
315 expected = (PAGETABLE(page) & MASK);
318 if(gMM_ZeroPage && (expected & PADDR_MASK) == gMM_ZeroPage )
320 else if(expected != CHANGEABLE_BITS)
324 if(expected != CHANGEABLE_BITS) {
325 MM_int_DumpTablesEnt( rangeStart, curPos - rangeStart, expected );
331 * \brief Get a pointer to a page entry
332 * \param Addr Virtual Address
333 * \param bTemp Use the Temporary fractal mapping
334 * \param bAllocate Allocate entries
335 * \param bLargePage Request a large page
336 * \param Pointer Location to place the calculated pointer
337 * \return Page size, or -ve on error
339 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer)
345 #define BITMASK(bits) ( (1LL << (bits))-1 )
349 pmlevels[3] = &TMPTABLE(0); // Page Table
350 pmlevels[2] = &TMPDIR(0); // PDIR
351 pmlevels[1] = &TMPDIRPTR(0); // PDPT
352 pmlevels[0] = &TMPMAPLVL4(0); // PML4
356 pmlevels[3] = (void*)MM_FRACTAL_BASE; // Page Table
357 pmlevels[2] = &pmlevels[3][(MM_FRACTAL_BASE>>12)&BITMASK(VIRT_BITS-12)]; // PDIR
358 pmlevels[1] = &pmlevels[2][(MM_FRACTAL_BASE>>21)&BITMASK(VIRT_BITS-21)]; // PDPT
359 pmlevels[0] = &pmlevels[1][(MM_FRACTAL_BASE>>30)&BITMASK(VIRT_BITS-30)]; // PML4
363 Addr &= (1ULL << 48)-1;
365 for( size = 39, i = 0; size > 12; size -= 9, i ++ )
367 Uint64 *ent = &pmlevels[i][Addr >> size];
368 // INVLPG( &pmlevels[i][ (Addr >> ADDR_SIZES[i]) &
370 // Check for a free large page slot
371 // TODO: Better support with selectable levels
372 if( (Addr & ((1ULL << size)-1)) == 0 && bLargePage )
374 if(Pointer) *Pointer = ent;
377 // Allocate an entry if required
378 if( !(*ent & PF_PRESENT) )
380 if( !bAllocate ) return -4; // If allocation is not requested, error
381 if( !(tmp = MM_AllocPhys()) ) return -2;
383 if( Addr < 0x800000000000 )
385 INVLPG( &pmlevels[i+1][ (Addr>>size)*512 ] );
386 memset( &pmlevels[i+1][ (Addr>>size)*512 ], 0, 0x1000 );
387 LOG("Init PML%i ent 0x%x %p with %P", 4 - i,
388 Addr>>size, (Addr>>size) << size, tmp);
391 else if( *ent & PF_LARGE )
394 if( (Addr & ((1ULL << size)-1)) != 0 ) return -3;
395 if(Pointer) *Pointer = ent;
396 return size; // Large page warning
400 // And, set the page table entry
401 if(Pointer) *Pointer = &pmlevels[i][Addr >> size];
406 * \brief Map a physical page to a virtual one
407 * \param VAddr Target virtual address
408 * \param PAddr Physical address of page
409 * \param bTemp Use tempoary mappings
410 * \param bLarge Treat as a large page
412 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
417 ENTER("xVAddr xPAddr", VAddr, PAddr);
419 // Get page pointer (Allow allocating)
420 rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
421 if(rv < 0) LEAVE_RET('i', 0);
423 if( *ent & 1 ) LEAVE_RET('i', 0);
427 if( VAddr < 0x800000000000 )
437 * \brief Map a physical page to a virtual one
438 * \param VAddr Target virtual address
439 * \param PAddr Physical address of page
441 int MM_Map(tVAddr VAddr, tPAddr PAddr)
443 return MM_MapEx(VAddr, PAddr, 0, 0);
447 * \brief Removed a mapped page
449 void MM_Unmap(tVAddr VAddr)
452 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
454 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
456 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
458 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
463 * \brief Allocate a block of memory at the specified virtual address
465 tPAddr MM_Allocate(tVAddr VAddr)
469 ENTER("xVAddr", VAddr);
471 // Ensure the tables are allocated before the page (keeps things neat)
472 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
475 ret = MM_AllocPhys();
476 LOG("ret = %x", ret);
477 if(!ret) LEAVE_RET('i', 0);
479 if( !MM_Map(VAddr, ret) )
481 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
491 tPAddr MM_AllocateZero(tVAddr VAddr)
493 tPAddr ret = gMM_ZeroPage;
495 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
498 ret = gMM_ZeroPage = MM_AllocPhys();
499 MM_RefPhys(ret); // Don't free this please
501 memset((void*)VAddr, 0, 0x1000);
506 MM_RefPhys(ret); // Refernce for this map
507 MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
512 * \brief Deallocate a page at a virtual address
514 void MM_Deallocate(tVAddr VAddr)
518 phys = MM_GetPhysAddr(VAddr);
527 * \brief Get the page table entry of a virtual address
528 * \param Addr Virtual Address
529 * \param Phys Location to put the physical address
530 * \param Flags Flags on the entry (set to zero if unmapped)
531 * \return Size of the entry (in address bits) - 12 = 4KiB page
533 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags)
538 if(!Phys || !Flags) return 0;
540 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
541 if( ret < 0 ) return 0;
543 *Phys = *ptr & PADDR_MASK;
544 *Flags = *ptr & 0xFFF;
549 * \brief Get the physical address of a virtual location
551 tPAddr MM_GetPhysAddr(tVAddr Addr)
556 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
557 if( ret < 0 ) return 0;
559 if( !(*ptr & 1) ) return 0;
561 return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
565 * \brief Sets the flags on a page
567 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
573 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
576 // Ensure the entry is valid
577 if( !(*ent & 1) ) return ;
580 if( Mask & MM_PFLAG_RO )
582 if( Flags & MM_PFLAG_RO ) {
591 if( Mask & MM_PFLAG_KERNEL )
593 if( Flags & MM_PFLAG_KERNEL ) {
602 if( Mask & MM_PFLAG_COW )
604 if( Flags & MM_PFLAG_COW ) {
615 if( Mask & MM_PFLAG_EXEC )
617 if( Flags & MM_PFLAG_EXEC ) {
627 * \brief Get the flags applied to a page
629 Uint MM_GetFlags(tVAddr VAddr)
634 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
637 if( !(*ent & 1) ) return 0;
640 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
642 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
644 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
646 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
651 // --- Hardware Mappings ---
653 * \brief Map a range of hardware pages
655 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
660 //TODO: Add speedups (memory of first possible free)
661 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
663 for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
665 if( MM_GetPhysAddr(ret) != 0 ) break;
667 if( num >= 0 ) continue;
669 PAddr += 0x1000 * Number;
682 Log_KernelPanic("MM", "TODO: Implement MM_MapHWPages");
687 * \brief Free a range of hardware pages
689 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
691 // Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
694 MM_DerefPhys( MM_GetPhysAddr(VAddr) );
702 * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
703 * \brief Allocates DMA physical memory
704 * \param Pages Number of pages required
705 * \param MaxBits Maximum number of bits the physical address can have
706 * \param PhysAddr Pointer to the location to place the physical address allocated
707 * \return Virtual address allocate
709 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
715 if(MaxBits < 12 || !PhysAddr) return 0;
718 if(Pages == 1 && MaxBits >= PHYS_BITS)
720 phys = MM_AllocPhys();
722 ret = MM_MapHWPages(phys, 1);
728 phys = MM_AllocPhysRange(Pages, MaxBits);
729 // - Was it allocated?
730 if(phys == 0) return 0;
732 // Allocated successfully, now map
733 ret = MM_MapHWPages(phys, Pages);
734 // MapHWPages references the pages, so deref them back down to 1
735 for(;Pages--;phys+=0x1000)
738 // If it didn't map, free then return 0
746 // --- Tempory Mappings ---
747 tVAddr MM_MapTemp(tPAddr PAddr)
749 const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
750 tVAddr ret = MM_TMPMAP_BASE;
753 for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
756 if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
771 void MM_FreeTemp(tVAddr VAddr)
773 MM_Deallocate(VAddr);
778 // --- Address Space Clone --
779 tPAddr MM_Clone(void)
785 // #1 Create a copy of the PML4
786 ret = MM_AllocPhys();
789 // #2 Alter the fractal pointer
790 Mutex_Acquire(&glMM_TempFractalLock);
794 // #3 Set Copy-On-Write to all user pages
795 for( i = 0; i < 256; i ++)
797 if( PAGEMAPLVL4(i) & PF_WRITE ) {
798 PAGEMAPLVL4(i) |= PF_COW;
799 PAGEMAPLVL4(i) &= ~PF_WRITE;
802 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
803 // Log_Debug("MM", "TMPMAPLVL4(%i) = 0x%016llx", i, TMPMAPLVL4(i));
804 if( !(TMPMAPLVL4(i) & PF_PRESENT) ) continue ;
806 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
809 // #4 Map in kernel pages
810 for( i = 256; i < 512; i ++ )
813 // 320 0xFFFFA.... - Kernel Stacks
814 if( i == 320 ) continue;
815 // 509 0xFFFFFE0.. - Fractal mapping
816 if( i == 508 ) continue;
817 // 510 0xFFFFFE8.. - Temp fractal mapping
818 if( i == 509 ) continue;
820 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
821 if( TMPMAPLVL4(i) & 1 )
822 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
825 // Mark Per-Process data as COW
826 TMPMAPLVL4(MM_PPD_BASE>>39) |= PF_COW;
827 TMPMAPLVL4(MM_PPD_BASE>>39) &= ~PF_WRITE;
829 // #5 Set fractal mapping
830 TMPMAPLVL4(MM_FRACTAL_BASE>>39) = ret | 3; // Main
831 TMPMAPLVL4(MM_TMPFRAC_BASE>>39) = 0; // Temp
833 // #6 Create kernel stack
834 // tThread->KernelStack is the top
835 // There is 1 guard page below the stack
836 kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
839 TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
840 for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
842 tPAddr phys = MM_AllocPhys();
844 MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
846 tmpmapping = MM_MapTemp(phys);
847 if( MM_GetPhysAddr( kstackbase+i*0x1000 ) )
848 memcpy((void*)tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
850 memset((void*)tmpmapping, 0, 0x1000);
852 // Debug_HexDump("MM_Clone: *tmpmapping = ", (void*)tmpmapping, 0x1000);
853 MM_FreeTemp(tmpmapping);
861 Mutex_Release(&glMM_TempFractalLock);
862 // Log("MM_Clone: RETURN %P", ret);
866 void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts)
868 Uint64 * const table_bases[] = {&PAGETABLE(0), &PAGEDIR(0), &PAGEDIRPTR(0), &PAGEMAPLVL4(0)};
869 Uint64 *table = table_bases[(LevelBits-12)/9] + (VAddr >> LevelBits);
871 // Log("MM_int_ClearTableLevel: (VAddr=%p, LevelBits=%i, MaxEnts=%i)", VAddr, LevelBits, MaxEnts);
872 for( i = 0; i < MaxEnts; i ++ )
874 // Skip non-present tables
875 if( !(table[i] & PF_PRESENT) ) {
880 if( (table[i] & PF_COW) && MM_GetRefCount(table[i] & PADDR_MASK) > 1 ) {
881 MM_DerefPhys(table[i] & PADDR_MASK);
885 // Clear table contents (if it is a table)
887 MM_int_ClearTableLevel(VAddr + ((tVAddr)i << LevelBits), LevelBits-9, 512);
888 MM_DerefPhys(table[i] & PADDR_MASK);
893 void MM_ClearUser(void)
895 MM_int_ClearTableLevel(0, 39, 256);
898 tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize)
903 // #1 Set temp fractal to PID0
904 Mutex_Acquire(&glMM_TempFractalLock);
905 TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
907 // #2 Scan for a free stack addresss < 2^47
908 for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
911 if( MM_GetPageEntryPtr(ret, 1, 0, 0, &ptr) <= 0 ) break;
912 if( !(*ptr & 1) ) break;
914 if( ret >= (1ULL << 47) ) {
915 Mutex_Release(&glMM_TempFractalLock);
919 // #3 Map all save the last page in the range
920 // - This acts as as guard page, and doesn't cost us anything.
921 for( i = 0; i < KERNEL_STACK_SIZE/0x1000 - 1; i ++ )
923 tPAddr phys = MM_AllocPhys();
926 Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
929 MM_MapEx(ret + i*0x1000, phys, 1, 0);
932 if( StackSize > 0x1000 ) {
933 Log_Error("MM", "MM_NewWorkerStack: StackSize(0x%x) > 0x1000, cbf handling", StackSize);
938 MM_GetPageEntryPtr(ret + i*0x1000, 1, 0, 0, &ptr);
939 paddr = *ptr & ~0xFFF;
940 tmp_addr = MM_MapTemp(paddr);
941 memcpy( (void*)(tmp_addr + (0x1000 - StackSize)), StackData, StackSize );
942 MM_FreeTemp(tmp_addr);
945 Mutex_Release(&glMM_TempFractalLock);
947 return ret + i*0x1000;
951 * \brief Allocate a new kernel stack
953 tVAddr MM_NewKStack(void)
955 tVAddr base = MM_KSTACK_BASE;
957 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
959 if(MM_GetPhysAddr(base+KERNEL_STACK_SIZE-0x1000) != 0)
962 //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
963 for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
965 if( !MM_Allocate(base+i) )
967 Log_Warning("MM", "MM_NewKStack - Allocation failed");
968 for( i -= 0x1000; i; i -= 0x1000)
969 MM_Deallocate(base+i);
974 return base + KERNEL_STACK_SIZE;
976 Log_Warning("MM", "MM_NewKStack - No address space left\n");