4 * Virtual Memory Manager
9 #include <threads_int.h>
13 // === DEBUG OPTIONS ===
17 #define PHYS_BITS 52 // TODO: Move out
25 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
26 #define PAGE_MASK ((1LL << 36)-1)
27 #define TABLE_MASK ((1LL << 27)-1)
28 #define PDP_MASK ((1LL << 18)-1)
29 #define PML4_MASK ((1LL << 9)-1)
31 #define PF_PRESENT 0x001
32 #define PF_WRITE 0x002
34 #define PF_LARGE 0x080
35 #define PF_GLOBAL 0x100
37 #define PF_PAGED 0x400
38 #define PF_NX 0x80000000##00000000
41 #define PAGETABLE(idx) (*((Uint64*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
42 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
43 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
44 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
46 #define TMPCR3() PAGEMAPLVL4(MM_TMPFRAC_BASE>>39)
47 #define TMPTABLE(idx) (*((Uint64*)MM_TMPFRAC_BASE+((idx)&PAGE_MASK)))
48 #define TMPDIR(idx) PAGETABLE((MM_TMPFRAC_BASE>>12)+((idx)&TABLE_MASK))
49 #define TMPDIRPTR(idx) PAGEDIR((MM_TMPFRAC_BASE>>21)+((idx)&PDP_MASK))
50 #define TMPMAPLVL4(idx) PAGEDIRPTR((MM_TMPFRAC_BASE>>30)+((idx)&PML4_MASK))
52 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr))
53 #define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
54 #define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
57 //tPAddr * const gaPageTable = MM_FRACTAL_BASE;
60 extern void Error_Backtrace(Uint IP, Uint BP);
61 extern tPAddr gInitialPML4[512];
62 extern void Threads_SegFault(tVAddr Addr);
63 extern char _UsertextBase[];
66 void MM_InitVirt(void);
67 //void MM_FinishVirtualInit(void);
68 void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable );
69 int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
70 void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected);
71 //void MM_DumpTables(tVAddr Start, tVAddr End);
72 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
73 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
74 // int MM_Map(tVAddr VAddr, tPAddr PAddr);
75 void MM_Unmap(tVAddr VAddr);
76 void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts);
77 //void MM_ClearUser(void);
78 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
81 tMutex glMM_TempFractalLock;
85 void MM_InitVirt(void)
87 // Log_Debug("MMVirt", "&PAGEMAPLVL4(0) = %p", &PAGEMAPLVL4(0));
88 // MM_DumpTables(0, -1L);
91 void MM_FinishVirtualInit(void)
97 * \brief Clone a page from an entry
98 * \param Ent Pointer to the entry in the PML4/PDP/PD/PT
99 * \param NextLevel Pointer to contents of the entry
100 * \param Addr Dest address
103 void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable )
105 tPAddr curpage = *Ent & PADDR_MASK;
108 if( MM_GetRefCount( curpage ) <= 0 ) {
109 Log_KernelPanic("MMVirt", "Page %P still marked COW, but unreferenced", curpage);
111 if( MM_GetRefCount( curpage ) == 1 )
114 *Ent |= PF_PRESENT|PF_WRITE;
116 Log_Debug("MMVirt", "COW ent at %p (%p) only %P", Ent, NextLevel, curpage);
124 if( !(paddr = MM_AllocPhys()) ) {
125 Threads_SegFault(Addr);
129 ASSERT(paddr != curpage);
131 tmp = MM_MapTemp(paddr);
132 memcpy( tmp, NextLevel, 0x1000 );
136 Log_Debug("MMVirt", "COW ent at %p (%p) from %P to %P", Ent, NextLevel, curpage, paddr);
139 MM_DerefPhys( curpage );
141 *Ent |= paddr|PF_PRESENT|PF_WRITE;
145 INVLPG( (tVAddr)NextLevel );
147 // Mark COW on contents if it's a PDPT, Dir or Table
150 Uint64 *dp = NextLevel;
152 for( i = 0; i < 512; i ++ )
154 if( !(dp[i] & PF_PRESENT) )
158 MM_RefPhys( dp[i] & PADDR_MASK );
159 if( dp[i] & PF_WRITE ) {
168 * \brief Called on a page fault
170 int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
172 // Log_Debug("MMVirt", "Addr = %p, ErrorCode = %x", Addr, ErrorCode);
174 // Catch reserved bits first
175 if( ErrorCode & 0x8 )
177 Log_Warning("MMVirt", "Reserved bits trashed!");
178 Log_Warning("MMVirt", "PML4 Ent = %P", PAGEMAPLVL4(Addr>>39));
179 if( !(PAGEMAPLVL4(Addr>>39) & PF_PRESENT) ) goto print_done;
180 Log_Warning("MMVirt", "PDP Ent = %P", PAGEDIRPTR(Addr>>30));
181 if( !(PAGEDIRPTR(Addr>>30) & PF_PRESENT) ) goto print_done;
182 Log_Warning("MMVirt", "PDir Ent = %P", PAGEDIR(Addr>>21));
183 if( !(PAGEDIR(Addr>>21) & PF_PRESENT) ) goto print_done;
184 Log_Warning("MMVirt", "PTable Ent = %P", PAGETABLE(Addr>>12));
185 if( !(PAGETABLE(Addr>>12) & PF_PRESENT) ) goto print_done;
191 // TODO: Implement Copy-on-Write
193 if( PAGEMAPLVL4(Addr>>39) & PF_PRESENT
194 && PAGEDIRPTR (Addr>>30) & PF_PRESENT
195 && PAGEDIR (Addr>>21) & PF_PRESENT
196 && PAGETABLE (Addr>>12) & PF_PRESENT )
199 if( PAGEMAPLVL4(Addr>>39) & PF_COW )
201 tPAddr *dp = &PAGEDIRPTR((Addr>>39)*512);
202 MM_int_ClonePageEnt( &PAGEMAPLVL4(Addr>>39), dp, Addr, 1 );
203 // MM_DumpTables(Addr>>39 << 39, (((Addr>>39) + 1) << 39) - 1);
206 if( PAGEDIRPTR(Addr>>30) & PF_COW )
208 tPAddr *dp = &PAGEDIR( (Addr>>30)*512 );
209 MM_int_ClonePageEnt( &PAGEDIRPTR(Addr>>30), dp, Addr, 1 );
210 // MM_DumpTables(Addr>>30 << 30, (((Addr>>30) + 1) << 30) - 1);
213 if( PAGEDIR(Addr>>21) & PF_COW )
215 tPAddr *dp = &PAGETABLE( (Addr>>21)*512 );
216 MM_int_ClonePageEnt( &PAGEDIR(Addr>>21), dp, Addr, 1 );
217 // MM_DumpTables(Addr>>21 << 21, (((Addr>>21) + 1) << 21) - 1);
220 if( PAGETABLE(Addr>>12) & PF_COW )
222 MM_int_ClonePageEnt( &PAGETABLE(Addr>>12), (void*)(Addr & ~0xFFF), Addr, 0 );
223 INVLPG( Addr & ~0xFFF );
229 // If it was a user, tell the thread handler
231 Warning("User %s %s memory%s",
232 (ErrorCode&2?"write to":"read from"),
233 (ErrorCode&1?"bad/locked":"non-present"),
234 (ErrorCode&16?" (Instruction Fetch)":"")
236 Warning("User Pagefault: Instruction at %04x:%p accessed %p",
237 Regs->CS, Regs->RIP, Addr);
238 __asm__ __volatile__ ("sti"); // Restart IRQs
239 Error_Backtrace(Regs->RIP, Regs->RBP);
240 Threads_SegFault(Addr);
246 // -- Check Error Code --
248 Warning("Reserved Bits Trashed!");
251 Warning("Kernel %s %s memory%s",
252 (ErrorCode&2?"write to":"read from"),
253 (ErrorCode&1?"bad/locked":"non-present"),
254 (ErrorCode&16?" (Instruction Fetch)":"")
258 Log("Thread %i - Code at %p accessed %p", Threads_GetTID(), Regs->RIP, Addr);
259 // Print Stack Backtrace
260 Error_Backtrace(Regs->RIP, Regs->RBP);
262 MM_DumpTables(0, -1);
267 void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected)
269 #define CANOICAL(addr) ((addr)&0x800000000000?(addr)|0xFFFF000000000000:(addr))
270 LogF("%016llx => ", CANOICAL(RangeStart));
271 // LogF("%6llx %6llx %6llx %016llx => ",
272 // MM_GetPhysAddr( (tVAddr)&PAGEDIRPTR(RangeStart>>30) ),
273 // MM_GetPhysAddr( (tVAddr)&PAGEDIR(RangeStart>>21) ),
274 // MM_GetPhysAddr( (tVAddr)&PAGETABLE(RangeStart>>12) ),
275 // CANOICAL(RangeStart)
277 if( gMM_ZeroPage && (PAGETABLE(RangeStart>>12) & PADDR_MASK) == gMM_ZeroPage )
278 LogF("%13s", "zero" );
280 LogF("%13llx", PAGETABLE(RangeStart>>12) & PADDR_MASK );
281 LogF(" : 0x%6llx (%c%c%c%c%c%c)\r\n",
283 (Expected & PF_GLOBAL ? 'G' : '-'),
284 (Expected & PF_NX ? '-' : 'x'),
285 (Expected & PF_PAGED ? 'p' : '-'),
286 (Expected & PF_COW ? 'C' : '-'),
287 (Expected & PF_USER ? 'U' : '-'),
288 (Expected & PF_WRITE ? 'W' : '-')
294 * \brief Dumps the layout of the page tables
296 void MM_DumpTables(tVAddr Start, tVAddr End)
298 const tPAddr FIXED_BITS = PF_PRESENT|PF_WRITE|PF_USER|PF_COW|PF_PAGED|PF_NX|PF_GLOBAL;
299 const tPAddr CHANGEABLE_BITS = ~FIXED_BITS & 0xFFF;
300 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
301 tVAddr rangeStart = 0;
302 tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
305 tPAddr expected_pml4 = PF_WRITE|PF_USER;
306 tPAddr expected_pdp = PF_WRITE|PF_USER;
307 tPAddr expected_pd = PF_WRITE|PF_USER;
309 Log("Table Entries: (%p to %p)", Start, End);
311 End &= (1L << 48) - 1;
313 Start >>= 12; End >>= 12;
315 for(page = Start, curPos = Start<<12;
317 curPos += 0x1000, page++)
319 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
320 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
321 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
322 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
325 if(!(PAGEMAPLVL4(page>>27) & PF_PRESENT)
326 || (PAGEMAPLVL4(page>>27) & FIXED_BITS) != expected_pml4
327 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
328 || (PAGEDIRPTR(page>>18) & FIXED_BITS) != expected_pdp
329 || !(PAGEDIR(page>>9) & PF_PRESENT)
330 || (PAGEDIR(page>>9) & FIXED_BITS) != expected_pd
331 || !(PAGETABLE(page) & PF_PRESENT)
332 || (PAGETABLE(page) & MASK) != expected)
334 if(expected != CHANGEABLE_BITS)
337 expected &= expected_pml4 | ~(PF_WRITE|PF_USER);
338 expected &= expected_pdp | ~(PF_WRITE|PF_USER);
339 expected &= expected_pd | ~(PF_WRITE|PF_USER);
340 expected |= expected_pml4 & PF_NX;
341 expected |= expected_pdp & PF_NX;
342 expected |= expected_pd & PF_NX;
343 Log("expected (pml4 = %x, pdp = %x, pd = %x)",
344 expected_pml4, expected_pdp, expected_pd);
346 MM_int_DumpTablesEnt( rangeStart, curPos - rangeStart, expected );
347 expected = CHANGEABLE_BITS;
350 if( curPos == 0x800000000000L )
351 curPos = 0xFFFF800000000000L;
353 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
354 page += (1 << 27) - 1;
355 curPos += (1L << 39) - 0x1000;
358 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
359 page += (1 << 18) - 1;
360 curPos += (1L << 30) - 0x1000;
363 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
364 page += (1 << 9) - 1;
365 curPos += (1L << 21) - 0x1000;
368 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
370 expected = (PAGETABLE(page) & MASK);
371 expected_pml4 = (PAGEMAPLVL4(page>>27) & FIXED_BITS);
372 expected_pdp = (PAGEDIRPTR (page>>18) & FIXED_BITS);
373 expected_pd = (PAGEDIR (page>> 9) & FIXED_BITS);
376 if(gMM_ZeroPage && (expected & PADDR_MASK) == gMM_ZeroPage )
378 else if(expected != CHANGEABLE_BITS)
382 if(expected != CHANGEABLE_BITS) {
386 MM_int_DumpTablesEnt( rangeStart, curPos - rangeStart, expected );
392 * \brief Get a pointer to a page entry
393 * \param Addr Virtual Address
394 * \param bTemp Use the Temporary fractal mapping
395 * \param bAllocate Allocate entries
396 * \param bLargePage Request a large page
397 * \param Pointer Location to place the calculated pointer
398 * \return Page size, or -ve on error
400 int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer)
406 #define BITMASK(bits) ( (1LL << (bits))-1 )
410 pmlevels[3] = &TMPTABLE(0); // Page Table
411 pmlevels[2] = &TMPDIR(0); // PDIR
412 pmlevels[1] = &TMPDIRPTR(0); // PDPT
413 pmlevels[0] = &TMPMAPLVL4(0); // PML4
417 pmlevels[3] = (void*)MM_FRACTAL_BASE; // Page Table
418 pmlevels[2] = &pmlevels[3][(MM_FRACTAL_BASE>>12)&BITMASK(VIRT_BITS-12)]; // PDIR
419 pmlevels[1] = &pmlevels[2][(MM_FRACTAL_BASE>>21)&BITMASK(VIRT_BITS-21)]; // PDPT
420 pmlevels[0] = &pmlevels[1][(MM_FRACTAL_BASE>>30)&BITMASK(VIRT_BITS-30)]; // PML4
424 Addr &= (1ULL << 48)-1;
426 for( size = 39, i = 0; size > 12; size -= 9, i ++ )
428 Uint64 *ent = &pmlevels[i][Addr >> size];
429 // INVLPG( &pmlevels[i][ (Addr >> ADDR_SIZES[i]) &
431 // Check for a free large page slot
432 // TODO: Better support with selectable levels
433 if( (Addr & ((1ULL << size)-1)) == 0 && bLargePage )
435 if(Pointer) *Pointer = ent;
438 // Allocate an entry if required
439 if( !(*ent & PF_PRESENT) )
441 if( !bAllocate ) return -4; // If allocation is not requested, error
442 if( !(tmp = MM_AllocPhys()) ) return -2;
444 if( Addr < 0x800000000000 )
446 INVLPG( &pmlevels[i+1][ (Addr>>size)*512 ] );
447 memset( &pmlevels[i+1][ (Addr>>size)*512 ], 0, 0x1000 );
448 LOG("Init PML%i ent 0x%x %p with %P (*ent = %P)", 4 - i,
449 Addr>>size, (Addr>>size) << size, tmp, *ent);
452 else if( *ent & PF_LARGE )
455 if( (Addr & ((1ULL << size)-1)) != 0 ) return -3;
456 if(Pointer) *Pointer = ent;
457 return size; // Large page warning
461 // And, set the page table entry
462 if(Pointer) *Pointer = &pmlevels[i][Addr >> size];
467 * \brief Map a physical page to a virtual one
468 * \param VAddr Target virtual address
469 * \param PAddr Physical address of page
470 * \param bTemp Use tempoary mappings
471 * \param bLarge Treat as a large page
473 int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
478 ENTER("pVAddr PPAddr", VAddr, PAddr);
480 // Get page pointer (Allow allocating)
481 rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
482 if(rv < 0) LEAVE_RET('i', 0);
484 if( *ent & 1 ) LEAVE_RET('i', 0);
488 if( VAddr < 0x800000000000 )
498 * \brief Map a physical page to a virtual one
499 * \param VAddr Target virtual address
500 * \param PAddr Physical address of page
502 int MM_Map(tVAddr VAddr, tPAddr PAddr)
504 return MM_MapEx(VAddr, PAddr, 0, 0);
508 * \brief Removed a mapped page
510 void MM_Unmap(tVAddr VAddr)
513 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
515 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
517 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
519 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
524 * \brief Allocate a block of memory at the specified virtual address
526 tPAddr MM_Allocate(tVAddr VAddr)
530 ENTER("xVAddr", VAddr);
532 // Ensure the tables are allocated before the page (keeps things neat)
533 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
536 ret = MM_AllocPhys();
537 LOG("ret = %x", ret);
538 if(!ret) LEAVE_RET('i', 0);
540 if( !MM_Map(VAddr, ret) )
542 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
552 tPAddr MM_AllocateZero(tVAddr VAddr)
554 tPAddr ret = gMM_ZeroPage;
556 MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
559 ret = gMM_ZeroPage = MM_AllocPhys();
560 MM_RefPhys(ret); // Don't free this please
562 memset((void*)VAddr, 0, 0x1000);
567 MM_RefPhys(ret); // Refernce for this map
568 MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
573 * \brief Deallocate a page at a virtual address
575 void MM_Deallocate(tVAddr VAddr)
579 phys = MM_GetPhysAddr(VAddr);
588 * \brief Get the page table entry of a virtual address
589 * \param Addr Virtual Address
590 * \param Phys Location to put the physical address
591 * \param Flags Flags on the entry (set to zero if unmapped)
592 * \return Size of the entry (in address bits) - 12 = 4KiB page
594 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags)
599 if(!Phys || !Flags) return 0;
601 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
602 if( ret < 0 ) return 0;
604 *Phys = *ptr & PADDR_MASK;
605 *Flags = *ptr & 0xFFF;
610 * \brief Get the physical address of a virtual location
612 tPAddr MM_GetPhysAddr(tVAddr Addr)
617 ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
618 if( ret < 0 ) return 0;
620 if( !(*ptr & 1) ) return 0;
622 return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
626 * \brief Sets the flags on a page
628 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
634 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
637 // Ensure the entry is valid
638 if( !(*ent & 1) ) return ;
641 if( Mask & MM_PFLAG_RO )
643 if( Flags & MM_PFLAG_RO ) {
652 if( Mask & MM_PFLAG_KERNEL )
654 if( Flags & MM_PFLAG_KERNEL ) {
663 if( Mask & MM_PFLAG_COW )
665 if( Flags & MM_PFLAG_COW ) {
677 if( Mask & MM_PFLAG_EXEC )
679 if( Flags & MM_PFLAG_EXEC ) {
689 * \brief Get the flags applied to a page
691 Uint MM_GetFlags(tVAddr VAddr)
696 rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
699 if( !(*ent & 1) ) return 0;
702 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
704 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
706 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
708 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
714 * \brief Check if the provided buffer is valid
715 * \return Boolean valid
717 int MM_IsValidBuffer(tVAddr Addr, size_t Size)
720 Uint64 pml4, pdp, dir, tab;
722 Size += Addr & (PAGE_SIZE-1);
723 Addr &= ~(PAGE_SIZE-1);
724 Addr &= ((1UL << 48)-1); // Clap to address space
731 if( !(PAGEMAPLVL4(pml4) & 1) ) return 0;
732 if( !(PAGEDIRPTR(pdp) & 1) ) return 0;
733 if( !(PAGEDIR(dir) & 1) ) return 0;
734 if( !(PAGETABLE(tab) & 1) ) return 0;
736 bIsUser = !!(PAGETABLE(tab) & PF_USER);
738 while( Size >= PAGE_SIZE )
740 if( (tab & 511) == 0 )
743 if( ((dir >> 9) & 511) == 0 )
746 if( ((pdp >> 18) & 511) == 0 )
749 if( !(PAGEMAPLVL4(pml4) & 1) ) return 0;
751 if( !(PAGEDIRPTR(pdp) & 1) ) return 0;
753 if( !(PAGEDIR(dir) & 1) ) return 0;
756 if( !(PAGETABLE(tab) & 1) ) return 0;
757 if( bIsUser && !(PAGETABLE(tab) & PF_USER) ) return 0;
765 // --- Hardware Mappings ---
767 * \brief Map a range of hardware pages
769 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
774 //TODO: Add speedups (memory of first possible free)
775 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
777 for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
779 if( MM_GetPhysAddr(ret) != 0 ) break;
781 if( num >= 0 ) continue;
783 // Log_Debug("MMVirt", "Mapping %i pages to %p (base %P)", Number, ret-Number*0x1000, PAddr);
785 PAddr += 0x1000 * Number;
798 Log_Error("MM", "MM_MapHWPages - No space for %i pages", Number);
803 * \brief Free a range of hardware pages
805 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
807 // Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
810 MM_DerefPhys( MM_GetPhysAddr(VAddr) );
818 * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
819 * \brief Allocates DMA physical memory
820 * \param Pages Number of pages required
821 * \param MaxBits Maximum number of bits the physical address can have
822 * \param PhysAddr Pointer to the location to place the physical address allocated
823 * \return Virtual address allocate
825 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
831 if(MaxBits < 12 || !PhysAddr) return 0;
834 if(Pages == 1 && MaxBits >= PHYS_BITS)
836 phys = MM_AllocPhys();
838 ret = MM_MapHWPages(phys, 1);
844 phys = MM_AllocPhysRange(Pages, MaxBits);
845 // - Was it allocated?
846 if(phys == 0) return 0;
848 // Allocated successfully, now map
849 ret = MM_MapHWPages(phys, Pages);
850 // MapHWPages references the pages, so deref them back down to 1
851 for(;Pages--;phys+=0x1000)
854 // If it didn't map, free then return 0
862 // --- Tempory Mappings ---
863 void *MM_MapTemp(tPAddr PAddr)
865 const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
866 tVAddr ret = MM_TMPMAP_BASE;
869 for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
872 if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
887 void MM_FreeTemp(void *Ptr)
889 MM_Deallocate((tVAddr)Ptr);
894 // --- Address Space Clone --
895 tPAddr MM_Clone(void)
901 // #1 Create a copy of the PML4
902 ret = MM_AllocPhys();
905 // #2 Alter the fractal pointer
906 Mutex_Acquire(&glMM_TempFractalLock);
910 // #3 Set Copy-On-Write to all user pages
911 if( Threads_GetPID() != 0 )
913 for( i = 0; i < 256; i ++)
915 if( PAGEMAPLVL4(i) & PF_WRITE ) {
916 PAGEMAPLVL4(i) |= PF_COW;
917 PAGEMAPLVL4(i) &= ~PF_WRITE;
920 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
921 // Log_Debug("MM", "TMPMAPLVL4(%i) = 0x%016llx", i, TMPMAPLVL4(i));
922 if( !(TMPMAPLVL4(i) & PF_PRESENT) ) continue ;
924 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
929 for( i = 0; i < 256; i ++ )
935 // #4 Map in kernel pages
936 for( i = 256; i < 512; i ++ )
939 // 320 0xFFFFA.... - Kernel Stacks
940 if( i == MM_KSTACK_BASE>>39 ) continue;
941 // 509 0xFFFFFE0.. - Fractal mapping
942 if( i == MM_FRACTAL_BASE>>39 ) continue;
943 // 510 0xFFFFFE8.. - Temp fractal mapping
944 if( i == MM_TMPFRAC_BASE>>39 ) continue;
946 TMPMAPLVL4(i) = PAGEMAPLVL4(i);
947 if( TMPMAPLVL4(i) & 1 )
948 MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
951 // Mark Per-Process data as COW
952 TMPMAPLVL4(MM_PPD_BASE>>39) |= PF_COW;
953 TMPMAPLVL4(MM_PPD_BASE>>39) &= ~PF_WRITE;
955 // #5 Set fractal mapping
956 TMPMAPLVL4(MM_FRACTAL_BASE>>39) = ret | 3; // Main
957 TMPMAPLVL4(MM_TMPFRAC_BASE>>39) = 0; // Temp
959 // #6 Create kernel stack
960 // tThread->KernelStack is the top
961 // There is 1 guard page below the stack
962 kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
965 TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
966 for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
968 tPAddr phys = MM_AllocPhys();
970 MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
972 tmpmapping = MM_MapTemp(phys);
973 if( MM_GetPhysAddr( kstackbase+i*0x1000 ) )
974 memcpy(tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
976 memset(tmpmapping, 0, 0x1000);
978 // Debug_HexDump("MM_Clone: *tmpmapping = ", (void*)tmpmapping, 0x1000);
979 MM_FreeTemp(tmpmapping);
987 Mutex_Release(&glMM_TempFractalLock);
988 // Log("MM_Clone: RETURN %P", ret);
992 void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts)
994 Uint64 * const table_bases[] = {&PAGETABLE(0), &PAGEDIR(0), &PAGEDIRPTR(0), &PAGEMAPLVL4(0)};
995 Uint64 *table = table_bases[(LevelBits-12)/9] + (VAddr >> LevelBits);
997 // Log("MM_int_ClearTableLevel: (VAddr=%p, LevelBits=%i, MaxEnts=%i)", VAddr, LevelBits, MaxEnts);
998 for( i = 0; i < MaxEnts; i ++ )
1000 // Skip non-present tables
1001 if( !(table[i] & PF_PRESENT) ) {
1006 if( (table[i] & PF_COW) && MM_GetRefCount(table[i] & PADDR_MASK) > 1 ) {
1007 MM_DerefPhys(table[i] & PADDR_MASK);
1011 // Clear table contents (if it is a table)
1012 if( LevelBits > 12 )
1013 MM_int_ClearTableLevel(VAddr + ((tVAddr)i << LevelBits), LevelBits-9, 512);
1014 MM_DerefPhys(table[i] & PADDR_MASK);
1019 void MM_ClearUser(void)
1021 MM_int_ClearTableLevel(0, 39, 256);
1024 tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize)
1030 // #1 Set temp fractal to PID0
1031 Mutex_Acquire(&glMM_TempFractalLock);
1032 TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
1035 // #2 Scan for a free stack addresss < 2^47
1036 for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
1039 if( MM_GetPageEntryPtr(ret, 1, 0, 0, &ptr) <= 0 ) break;
1040 if( !(*ptr & 1) ) break;
1042 if( ret >= (1ULL << 47) ) {
1043 Mutex_Release(&glMM_TempFractalLock);
1047 // #3 Map all save the last page in the range
1048 // - This acts as as guard page
1049 MM_GetPageEntryPtr(ret, 1, 1, 0, NULL); // Make sure tree is allocated
1050 for( i = 0; i < KERNEL_STACK_SIZE/0x1000 - 1; i ++ )
1052 phys = MM_AllocPhys();
1055 Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
1058 MM_MapEx(ret + i*0x1000, phys, 1, 0);
1059 MM_SetFlags(ret + i*0x1000, MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL);
1063 if( StackSize > 0x1000 ) {
1064 Log_Error("MM", "MM_NewWorkerStack: StackSize(0x%x) > 0x1000, cbf handling", StackSize);
1067 void *tmp_addr, *dest;
1068 tmp_addr = MM_MapTemp(phys);
1069 dest = (char*)tmp_addr + (0x1000 - StackSize);
1070 memcpy( dest, StackData, StackSize );
1071 Log_Debug("MM", "MM_NewWorkerStack: %p->%p %i bytes (i=%i)", StackData, dest, StackSize, i);
1072 Log_Debug("MM", "MM_NewWorkerStack: ret = %p", ret);
1073 MM_FreeTemp(tmp_addr);
1077 Mutex_Release(&glMM_TempFractalLock);
1079 return ret + i*0x1000;
1083 * \brief Allocate a new kernel stack
1085 tVAddr MM_NewKStack(void)
1087 tVAddr base = MM_KSTACK_BASE;
1089 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
1091 if(MM_GetPhysAddr(base+KERNEL_STACK_SIZE-0x1000) != 0)
1094 //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
1095 for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
1097 if( !MM_Allocate(base+i) )
1099 Log_Warning("MM", "MM_NewKStack - Allocation failed");
1100 for( i -= 0x1000; i; i -= 0x1000)
1101 MM_Deallocate(base+i);
1106 return base + KERNEL_STACK_SIZE;
1108 Log_Warning("MM", "MM_NewKStack - No address space left\n");