4 * Virtual Memory Manager
9 #include <threads_int.h>
14 #define PHYS_BITS 52 // TODO: Move out
22 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
23 #define PAGE_MASK ((1LL << 36)-1)
24 #define TABLE_MASK ((1LL << 27)-1)
25 #define PDP_MASK ((1LL << 18)-1)
26 #define PML4_MASK ((1LL << 9)-1)
28 #define PF_PRESENT 0x001
29 #define PF_WRITE 0x002
31 #define PF_LARGE 0x080
32 #define PF_GLOBAL 0x100
34 #define PF_PAGED 0x400
35 #define PF_NX 0x80000000##00000000
38 #define PAGETABLE(idx) (*((Uint64*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
39 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
40 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
41 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
43 #define TMPCR3() PAGEMAPLVL4(MM_TMPFRAC_BASE>>39)
44 #define TMPTABLE(idx) (*((Uint64*)MM_TMPFRAC_BASE+((idx)&PAGE_MASK)))
45 #define TMPDIR(idx) PAGETABLE((MM_TMPFRAC_BASE>>12)+((idx)&TABLE_MASK))
46 #define TMPDIRPTR(idx) PAGEDIR((MM_TMPFRAC_BASE>>21)+((idx)&PDP_MASK))
47 #define TMPMAPLVL4(idx) PAGEDIRPTR((MM_TMPFRAC_BASE>>30)+((idx)&PML4_MASK))
49 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr))
50 #define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
51 #define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
54 //tPAddr * const gaPageTable = MM_FRACTAL_BASE;
57 extern void Error_Backtrace(Uint IP, Uint BP);
58 extern tPAddr gInitialPML4[512];
59 extern void Threads_SegFault(tVAddr Addr);
60 extern char _UsertextBase[];
63 void MM_InitVirt(void);
64 //void MM_FinishVirtualInit(void);
65 void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable );
66 int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
67 void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected);
68 //void MM_DumpTables(tVAddr Start, tVAddr End);
69 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
70 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
71 // int MM_Map(tVAddr VAddr, tPAddr PAddr);
72 void MM_Unmap(tVAddr VAddr);
73 void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts);
74 //void MM_ClearUser(void);
75 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
78 tMutex glMM_TempFractalLock;
82 void MM_InitVirt(void)
84 // Log_Debug("MMVirt", "&PAGEMAPLVL4(0) = %p", &PAGEMAPLVL4(0));
85 // MM_DumpTables(0, -1L);
88 void MM_FinishVirtualInit(void)
94 * \brief Clone a page from an entry
95 * \param Ent Pointer to the entry in the PML4/PDP/PD/PT
96 * \param NextLevel Pointer to contents of the entry
97 * \param Addr Dest address
100 void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable )
102 tPAddr curpage = *Ent & PADDR_MASK;
105 if( MM_GetRefCount( curpage ) <= 0 ) {
106 Log_KernelPanic("MMVirt", "Page %P still marked COW, but unreferenced", curpage);
108 if( MM_GetRefCount( curpage ) == 1 )
111 *Ent |= PF_PRESENT|PF_WRITE;
112 Log_Debug("MMVirt", "COW ent at %p (%p) only %P", Ent, NextLevel, curpage);
119 if( !(paddr = MM_AllocPhys()) ) {
120 Threads_SegFault(Addr);
124 ASSERT(paddr != curpage);
126 tmp = (void*)MM_MapTemp(paddr);
127 memcpy( tmp, NextLevel, 0x1000 );
128 MM_FreeTemp( (tVAddr)tmp );
130 Log_Debug("MMVirt", "COW ent at %p (%p) from %P to %P", Ent, NextLevel, curpage, paddr);
132 MM_DerefPhys( curpage );
134 *Ent |= paddr|PF_PRESENT|PF_WRITE;
138 INVLPG( (tVAddr)NextLevel );
140 // Mark COW on contents if it's a PDPT, Dir or Table
143 Uint64 *dp = NextLevel;
145 for( i = 0; i < 512; i ++ )
147 if( !(dp[i] & PF_PRESENT) )
151 MM_RefPhys( dp[i] & PADDR_MASK );
152 if( dp[i] & PF_WRITE ) {
161 * \brief Called on a page fault
163 int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
165 // Log_Debug("MMVirt", "Addr = %p, ErrorCode = %x", Addr, ErrorCode);
167 // Catch reserved bits first
168 if( ErrorCode & 0x8 )
170 Log_Warning("MMVirt", "Reserved bits trashed!");
171 Log_Warning("MMVirt", "PML4 Ent = %P", PAGEMAPLVL4(Addr>>39));
172 if( !(PAGEMAPLVL4(Addr>>39) & PF_PRESENT) ) goto print_done;
173 Log_Warning("MMVirt", "PDP Ent = %P", PAGEDIRPTR(Addr>>30));
174 if( !(PAGEDIRPTR(Addr>>30) & PF_PRESENT) ) goto print_done;
175 Log_Warning("MMVirt", "PDir Ent = %P", PAGEDIR(Addr>>21));
176 if( !(PAGEDIR(Addr>>21) & PF_PRESENT) ) goto print_done;
177 Log_Warning("MMVirt", "PTable Ent = %P", PAGETABLE(Addr>>12));
178 if( !(PAGETABLE(Addr>>12) & PF_PRESENT) ) goto print_done;
184 // TODO: Implement Copy-on-Write
186 if( PAGEMAPLVL4(Addr>>39) & PF_PRESENT
187 && PAGEDIRPTR (Addr>>30) & PF_PRESENT
188 && PAGEDIR (Addr>>21) & PF_PRESENT
189 && PAGETABLE (Addr>>12) & PF_PRESENT )
192 if( PAGEMAPLVL4(Addr>>39) & PF_COW )
194 tPAddr *dp = &PAGEDIRPTR((Addr>>39)*512);
195 MM_int_ClonePageEnt( &PAGEMAPLVL4(Addr>>39), dp, Addr, 1 );
196 // MM_DumpTables(Addr>>39 << 39, (((Addr>>39) + 1) << 39) - 1);
199 if( PAGEDIRPTR(Addr>>30) & PF_COW )
201 tPAddr *dp = &PAGEDIR( (Addr>>30)*512 );
202 MM_int_ClonePageEnt( &PAGEDIRPTR(Addr>>30), dp, Addr, 1 );
203 // MM_DumpTables(Addr>>30 << 30, (((Addr>>30) + 1) << 30) - 1);
206 if( PAGEDIR(Addr>>21) & PF_COW )
208 tPAddr *dp = &PAGETABLE( (Addr>>21)*512 );
209 MM_int_ClonePageEnt( &PAGEDIR(Addr>>21), dp, Addr, 1 );
210 // MM_DumpTables(Addr>>21 << 21, (((Addr>>21) + 1) << 21) - 1);
213 if( PAGETABLE(Addr>>12) & PF_COW )
215 MM_int_ClonePageEnt( &PAGETABLE(Addr>>12), (void*)(Addr & ~0xFFF), Addr, 0 );
216 INVLPG( Addr & ~0xFFF );
222 // If it was a user, tell the thread handler
224 Warning("User %s %s memory%s",
225 (ErrorCode&2?"write to":"read from"),
226 (ErrorCode&1?"bad/locked":"non-present"),
227 (ErrorCode&16?" (Instruction Fetch)":"")
229 Warning("User Pagefault: Instruction at %04x:%p accessed %p",
230 Regs->CS, Regs->RIP, Addr);
231 __asm__ __volatile__ ("sti"); // Restart IRQs
232 Error_Backtrace(Regs->RIP, Regs->RBP);
233 Threads_SegFault(Addr);
239 // -- Check Error Code --
241 Warning("Reserved Bits Trashed!");
244 Warning("Kernel %s %s memory%s",
245 (ErrorCode&2?"write to":"read from"),
246 (ErrorCode&1?"bad/locked":"non-present"),
247 (ErrorCode&16?" (Instruction Fetch)":"")
251 Log("Thread %i - Code at %p accessed %p", Threads_GetTID(), Regs->RIP, Addr);
252 // Print Stack Backtrace
253 Error_Backtrace(Regs->RIP, Regs->RBP);
255 MM_DumpTables(0, -1);
260 void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected)
262 #define CANOICAL(addr) ((addr)&0x800000000000?(addr)|0xFFFF000000000000:(addr))
263 LogF("%016llx => ", CANOICAL(RangeStart));
264 // LogF("%6llx %6llx %6llx %016llx => ",
265 // MM_GetPhysAddr( (tVAddr)&PAGEDIRPTR(RangeStart>>30) ),
266 // MM_GetPhysAddr( (tVAddr)&PAGEDIR(RangeStart>>21) ),
267 // MM_GetPhysAddr( (tVAddr)&PAGETABLE(RangeStart>>12) ),
268 // CANOICAL(RangeStart)
270 if( gMM_ZeroPage && (PAGETABLE(RangeStart>>12) & PADDR_MASK) == gMM_ZeroPage )
271 LogF("%13s", "zero" );
273 LogF("%13llx", PAGETABLE(RangeStart>>12) & PADDR_MASK );
274 LogF(" : 0x%6llx (%c%c%c%c)\r\n",
276 (Expected & PF_PAGED ? 'p' : '-'),
277 (Expected & PF_COW ? 'C' : '-'),
278 (Expected & PF_USER ? 'U' : '-'),
279 (Expected & PF_WRITE ? 'W' : '-')
285 * \brief Dumps the layout of the page tables
287 void MM_DumpTables(tVAddr Start, tVAddr End)
289 const tPAddr CHANGEABLE_BITS = ~(PF_PRESENT|PF_WRITE|PF_USER|PF_COW|PF_PAGED) & 0xFFF;
290 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
291 tVAddr rangeStart = 0;
292 tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
296 Log("Table Entries: (%p to %p)", Start, End);
298 End &= (1L << 48) - 1;
300 Start >>= 12; End >>= 12;
302 for(page = Start, curPos = Start<<12;
304 curPos += 0x1000, page++)
306 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
307 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
308 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
309 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
312 if(!(PAGEMAPLVL4(page>>27) & PF_PRESENT)
313 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
314 || !(PAGEDIR(page>>9) & PF_PRESENT)
315 || !(PAGETABLE(page) & PF_PRESENT)
316 || (PAGETABLE(page) & MASK) != expected)
318 if(expected != CHANGEABLE_BITS)
320 MM_int_DumpTablesEnt( rangeStart, curPos - rangeStart, expected );
321 expected = CHANGEABLE_BITS;
324 if( curPos == 0x800000000000L )
325 curPos = 0xFFFF800000000000L;
327 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
328 page += (1 << 27) - 1;
329 curPos += (1L << 39) - 0x1000;
332 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
333 page += (1 << 18) - 1;
334 curPos += (1L << 30) - 0x1000;
337 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
338 page += (1 << 9) - 1;
339 curPos += (1L << 21) - 0x1000;
342 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
344 expected = (PAGETABLE(page) & MASK);
347 if(gMM_ZeroPage && (expected & PADDR_MASK) == gMM_ZeroPage )
349 else if(expected != CHANGEABLE_BITS)
353 if(expected != CHANGEABLE_BITS) {
354 MM_int_DumpTablesEnt( rangeStart, curPos - rangeStart, expected );
360 * \brief Get a pointer to a page entry
361 * \param Addr Virtual Address
362 * \param bTemp Use the Temporary fractal mapping
363 * \param bAllocate Allocate entries
364 * \param bLargePage Request a large page
365 * \param Pointer Location to place the calculated pointer
366 * \return Page size, or -ve on error
368 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer)
374 #define BITMASK(bits) ( (1LL << (bits))-1 )
378 pmlevels[3] = &TMPTABLE(0); // Page Table
379 pmlevels[2] = &TMPDIR(0); // PDIR
380 pmlevels[1] = &TMPDIRPTR(0); // PDPT
381 pmlevels[0] = &TMPMAPLVL4(0); // PML4
385 pmlevels[3] = (void*)MM_FRACTAL_BASE; // Page Table
386 pmlevels[2] = &pmlevels[3][(MM_FRACTAL_BASE>>12)&BITMASK(VIRT_BITS-12)]; // PDIR
387 pmlevels[1] = &pmlevels[2][(MM_FRACTAL_BASE>>21)&BITMASK(VIRT_BITS-21)]; // PDPT
388 pmlevels[0] = &pmlevels[1][(MM_FRACTAL_BASE>>30)&BITMASK(VIRT_BITS-30)]; // PML4
392 Addr &= (1ULL << 48)-1;
394 for( size = 39, i = 0; size > 12; size -= 9, i ++ )
396 Uint64 *ent = &pmlevels[i][Addr >> size];
397 // INVLPG( &pmlevels[i][ (Addr >> ADDR_SIZES[i]) &
399 // Check for a free large page slot
400 // TODO: Better support with selectable levels
401 if( (Addr & ((1ULL << size)-1)) == 0 && bLargePage )
403 if(Pointer) *Pointer = ent;
406 // Allocate an entry if required
407 if( !(*ent & PF_PRESENT) )
409 if( !bAllocate ) return -4; // If allocation is not requested, error
410 if( !(tmp = MM_AllocPhys()) ) return -2;
412 if( Addr < 0x800000000000 )
414 INVLPG( &pmlevels[i+1][ (Addr>>size)*512 ] );
415 memset( &pmlevels[i+1][ (Addr>>size)*512 ], 0, 0x1000 );
416 LOG("Init PML%i ent 0x%x %p with %P", 4 - i,
417 Addr>>size, (Addr>>size) << size, tmp);
420 else if( *ent & PF_LARGE )
423 if( (Addr & ((1ULL << size)-1)) != 0 ) return -3;
424 if(Pointer) *Pointer = ent;
425 return size; // Large page warning
429 // And, set the page table entry
430 if(Pointer) *Pointer = &pmlevels[i][Addr >> size];
435 * \brief Map a physical page to a virtual one
436 * \param VAddr Target virtual address
437 * \param PAddr Physical address of page
438 * \param bTemp Use tempoary mappings
439 * \param bLarge Treat as a large page
441 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
446 ENTER("pVAddr PPAddr", VAddr, PAddr);
448 // Get page pointer (Allow allocating)
449 rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
450 if(rv < 0) LEAVE_RET('i', 0);
452 if( *ent & 1 ) LEAVE_RET('i', 0);
456 if( VAddr < 0x800000000000 )
466 * \brief Map a physical page to a virtual one
467 * \param VAddr Target virtual address
468 * \param PAddr Physical address of page
470 int MM_Map(tVAddr VAddr, tPAddr PAddr)
472 return MM_MapEx(VAddr, PAddr, 0, 0);
476 * \brief Removed a mapped page
478 void MM_Unmap(tVAddr VAddr)
481 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
483 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
485 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
487 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
492 * \brief Allocate a block of memory at the specified virtual address
494 tPAddr MM_Allocate(tVAddr VAddr)
498 ENTER("xVAddr", VAddr);
500 // Ensure the tables are allocated before the page (keeps things neat)
501 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
504 ret = MM_AllocPhys();
505 LOG("ret = %x", ret);
506 if(!ret) LEAVE_RET('i', 0);
508 if( !MM_Map(VAddr, ret) )
510 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
520 tPAddr MM_AllocateZero(tVAddr VAddr)
522 tPAddr ret = gMM_ZeroPage;
524 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
527 ret = gMM_ZeroPage = MM_AllocPhys();
528 MM_RefPhys(ret); // Don't free this please
530 memset((void*)VAddr, 0, 0x1000);
535 MM_RefPhys(ret); // Refernce for this map
536 MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
541 * \brief Deallocate a page at a virtual address
543 void MM_Deallocate(tVAddr VAddr)
547 phys = MM_GetPhysAddr(VAddr);
556 * \brief Get the page table entry of a virtual address
557 * \param Addr Virtual Address
558 * \param Phys Location to put the physical address
559 * \param Flags Flags on the entry (set to zero if unmapped)
560 * \return Size of the entry (in address bits) - 12 = 4KiB page
562 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags)
567 if(!Phys || !Flags) return 0;
569 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
570 if( ret < 0 ) return 0;
572 *Phys = *ptr & PADDR_MASK;
573 *Flags = *ptr & 0xFFF;
578 * \brief Get the physical address of a virtual location
580 tPAddr MM_GetPhysAddr(tVAddr Addr)
585 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
586 if( ret < 0 ) return 0;
588 if( !(*ptr & 1) ) return 0;
590 return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
594 * \brief Sets the flags on a page
596 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
602 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
605 // Ensure the entry is valid
606 if( !(*ent & 1) ) return ;
609 if( Mask & MM_PFLAG_RO )
611 if( Flags & MM_PFLAG_RO ) {
620 if( Mask & MM_PFLAG_KERNEL )
622 if( Flags & MM_PFLAG_KERNEL ) {
631 if( Mask & MM_PFLAG_COW )
633 if( Flags & MM_PFLAG_COW ) {
644 if( Mask & MM_PFLAG_EXEC )
646 if( Flags & MM_PFLAG_EXEC ) {
656 * \brief Get the flags applied to a page
658 Uint MM_GetFlags(tVAddr VAddr)
663 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
666 if( !(*ent & 1) ) return 0;
669 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
671 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
673 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
675 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
681 * \brief Check if the provided buffer is valid
682 * \return Boolean valid
684 int MM_IsValidBuffer(tVAddr Addr, size_t Size)
687 Uint64 pml4, pdp, dir, tab;
689 Size += Addr & (PAGE_SIZE-1);
690 Addr &= ~(PAGE_SIZE-1);
691 Addr &= ((1UL << 48)-1); // Clap to address space
698 if( !(PAGEMAPLVL4(pml4) & 1) ) return 0;
699 if( !(PAGEDIRPTR(pdp) & 1) ) return 0;
700 if( !(PAGEDIR(dir) & 1) ) return 0;
701 if( !(PAGETABLE(tab) & 1) ) return 0;
703 bIsUser = !!(PAGETABLE(tab) & PF_USER);
705 while( Size >= PAGE_SIZE )
707 if( (tab & 511) == 0 )
710 if( ((dir >> 9) & 511) == 0 )
713 if( ((pdp >> 18) & 511) == 0 )
716 if( !(PAGEMAPLVL4(pml4) & 1) ) return 0;
718 if( !(PAGEDIRPTR(pdp) & 1) ) return 0;
720 if( !(PAGEDIR(dir) & 1) ) return 0;
723 if( !(PAGETABLE(tab) & 1) ) return 0;
724 if( bIsUser && !(PAGETABLE(tab) & PF_USER) ) return 0;
732 // --- Hardware Mappings ---
734 * \brief Map a range of hardware pages
736 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
741 //TODO: Add speedups (memory of first possible free)
742 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
744 for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
746 if( MM_GetPhysAddr(ret) != 0 ) break;
748 if( num >= 0 ) continue;
750 // Log_Debug("MMVirt", "Mapping %i pages to %p (base %P)", Number, ret-Number*0x1000, PAddr);
752 PAddr += 0x1000 * Number;
765 Log_Error("MM", "MM_MapHWPages - No space for %i pages", Number);
770 * \brief Free a range of hardware pages
772 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
774 // Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
777 MM_DerefPhys( MM_GetPhysAddr(VAddr) );
785 * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
786 * \brief Allocates DMA physical memory
787 * \param Pages Number of pages required
788 * \param MaxBits Maximum number of bits the physical address can have
789 * \param PhysAddr Pointer to the location to place the physical address allocated
790 * \return Virtual address allocate
792 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
798 if(MaxBits < 12 || !PhysAddr) return 0;
801 if(Pages == 1 && MaxBits >= PHYS_BITS)
803 phys = MM_AllocPhys();
805 ret = MM_MapHWPages(phys, 1);
811 phys = MM_AllocPhysRange(Pages, MaxBits);
812 // - Was it allocated?
813 if(phys == 0) return 0;
815 // Allocated successfully, now map
816 ret = MM_MapHWPages(phys, Pages);
817 // MapHWPages references the pages, so deref them back down to 1
818 for(;Pages--;phys+=0x1000)
821 // If it didn't map, free then return 0
829 // --- Tempory Mappings ---
830 tVAddr MM_MapTemp(tPAddr PAddr)
832 const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
833 tVAddr ret = MM_TMPMAP_BASE;
836 for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
839 if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
854 void MM_FreeTemp(tVAddr VAddr)
856 MM_Deallocate(VAddr);
861 // --- Address Space Clone --
862 tPAddr MM_Clone(void)
868 // #1 Create a copy of the PML4
869 ret = MM_AllocPhys();
872 // #2 Alter the fractal pointer
873 Mutex_Acquire(&glMM_TempFractalLock);
877 // #3 Set Copy-On-Write to all user pages
878 for( i = 0; i < 256; i ++)
880 if( PAGEMAPLVL4(i) & PF_WRITE ) {
881 PAGEMAPLVL4(i) |= PF_COW;
882 PAGEMAPLVL4(i) &= ~PF_WRITE;
885 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
886 // Log_Debug("MM", "TMPMAPLVL4(%i) = 0x%016llx", i, TMPMAPLVL4(i));
887 if( !(TMPMAPLVL4(i) & PF_PRESENT) ) continue ;
889 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
892 // #4 Map in kernel pages
893 for( i = 256; i < 512; i ++ )
896 // 320 0xFFFFA.... - Kernel Stacks
897 if( i == MM_KSTACK_BASE>>39 ) continue;
898 // 509 0xFFFFFE0.. - Fractal mapping
899 if( i == MM_FRACTAL_BASE>>39 ) continue;
900 // 510 0xFFFFFE8.. - Temp fractal mapping
901 if( i == MM_TMPFRAC_BASE>>39 ) continue;
903 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
904 if( TMPMAPLVL4(i) & 1 )
905 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
908 // Mark Per-Process data as COW
909 TMPMAPLVL4(MM_PPD_BASE>>39) |= PF_COW;
910 TMPMAPLVL4(MM_PPD_BASE>>39) &= ~PF_WRITE;
912 // #5 Set fractal mapping
913 TMPMAPLVL4(MM_FRACTAL_BASE>>39) = ret | 3; // Main
914 TMPMAPLVL4(MM_TMPFRAC_BASE>>39) = 0; // Temp
916 // #6 Create kernel stack
917 // tThread->KernelStack is the top
918 // There is 1 guard page below the stack
919 kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
922 TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
923 for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
925 tPAddr phys = MM_AllocPhys();
927 MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
929 tmpmapping = MM_MapTemp(phys);
930 if( MM_GetPhysAddr( kstackbase+i*0x1000 ) )
931 memcpy((void*)tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
933 memset((void*)tmpmapping, 0, 0x1000);
935 // Debug_HexDump("MM_Clone: *tmpmapping = ", (void*)tmpmapping, 0x1000);
936 MM_FreeTemp(tmpmapping);
944 Mutex_Release(&glMM_TempFractalLock);
945 // Log("MM_Clone: RETURN %P", ret);
949 void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts)
951 Uint64 * const table_bases[] = {&PAGETABLE(0), &PAGEDIR(0), &PAGEDIRPTR(0), &PAGEMAPLVL4(0)};
952 Uint64 *table = table_bases[(LevelBits-12)/9] + (VAddr >> LevelBits);
954 // Log("MM_int_ClearTableLevel: (VAddr=%p, LevelBits=%i, MaxEnts=%i)", VAddr, LevelBits, MaxEnts);
955 for( i = 0; i < MaxEnts; i ++ )
957 // Skip non-present tables
958 if( !(table[i] & PF_PRESENT) ) {
963 if( (table[i] & PF_COW) && MM_GetRefCount(table[i] & PADDR_MASK) > 1 ) {
964 MM_DerefPhys(table[i] & PADDR_MASK);
968 // Clear table contents (if it is a table)
970 MM_int_ClearTableLevel(VAddr + ((tVAddr)i << LevelBits), LevelBits-9, 512);
971 MM_DerefPhys(table[i] & PADDR_MASK);
976 void MM_ClearUser(void)
978 MM_int_ClearTableLevel(0, 39, 256);
981 tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize)
986 // #1 Set temp fractal to PID0
987 Mutex_Acquire(&glMM_TempFractalLock);
988 TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
990 // #2 Scan for a free stack addresss < 2^47
991 for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
994 if( MM_GetPageEntryPtr(ret, 1, 0, 0, &ptr) <= 0 ) break;
995 if( !(*ptr & 1) ) break;
997 if( ret >= (1ULL << 47) ) {
998 Mutex_Release(&glMM_TempFractalLock);
1002 // #3 Map all save the last page in the range
1003 // - This acts as as guard page, and doesn't cost us anything.
1004 for( i = 0; i < KERNEL_STACK_SIZE/0x1000 - 1; i ++ )
1006 tPAddr phys = MM_AllocPhys();
1009 Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
1012 MM_MapEx(ret + i*0x1000, phys, 1, 0);
1015 if( StackSize > 0x1000 ) {
1016 Log_Error("MM", "MM_NewWorkerStack: StackSize(0x%x) > 0x1000, cbf handling", StackSize);
1021 MM_GetPageEntryPtr(ret + i*0x1000, 1, 0, 0, &ptr);
1022 paddr = *ptr & ~0xFFF;
1023 tmp_addr = MM_MapTemp(paddr);
1024 memcpy( (void*)(tmp_addr + (0x1000 - StackSize)), StackData, StackSize );
1025 MM_FreeTemp(tmp_addr);
1028 Mutex_Release(&glMM_TempFractalLock);
1030 return ret + i*0x1000;
1034 * \brief Allocate a new kernel stack
1036 tVAddr MM_NewKStack(void)
1038 tVAddr base = MM_KSTACK_BASE;
1040 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
1042 if(MM_GetPhysAddr(base+KERNEL_STACK_SIZE-0x1000) != 0)
1045 //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
1046 for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
1048 if( !MM_Allocate(base+i) )
1050 Log_Warning("MM", "MM_NewKStack - Allocation failed");
1051 for( i -= 0x1000; i; i -= 0x1000)
1052 MM_Deallocate(base+i);
1057 return base + KERNEL_STACK_SIZE;
1059 Log_Warning("MM", "MM_NewKStack - No address space left\n");