4 * Virtual Memory Manager
17 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
18 #define PAGE_MASK (((Uint)1 << 36)-1)
19 #define TABLE_MASK (((Uint)1 << 27)-1)
20 #define PDP_MASK (((Uint)1 << 18)-1)
21 #define PML4_MASK (((Uint)1 << 9)-1)
23 #define PF_PRESENT 0x1
28 #define PF_PAGED 0x400
29 #define PF_NX 0x80000000##00000000
32 #define PAGETABLE(idx) (*((tPAddr*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
33 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
34 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
35 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
37 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr));
40 void MM_InitVirt(void);
41 //void MM_FinishVirtualInit(void);
42 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
43 void MM_DumpTables(tVAddr Start, tVAddr End);
44 // int MM_Map(tVAddr VAddr, tPAddr PAddr);
45 void MM_Unmap(tVAddr VAddr);
46 void MM_ClearUser(void);
47 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
52 void MM_InitVirt(void)
54 MM_DumpTables(0, -1L);
57 void MM_FinishVirtualInit(void)
62 * \brief Called on a page fault
64 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
66 // TODO: Copy on Write
68 if( gaPageDir [Addr>>22] & PF_PRESENT
69 && gaPageTable[Addr>>12] & PF_PRESENT
70 && gaPageTable[Addr>>12] & PF_COW )
73 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
75 gaPageTable[Addr>>12] &= ~PF_COW;
76 gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
80 //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
81 paddr = MM_DuplicatePage( Addr );
82 MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
83 gaPageTable[Addr>>12] &= PF_USER;
84 gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
87 INVLPG( Addr & ~0xFFF );
92 // If it was a user, tell the thread handler
94 Warning("%s %s %s memory%s",
95 (ErrorCode&4?"User":"Kernel"),
96 (ErrorCode&2?"write to":"read from"),
97 (ErrorCode&1?"bad/locked":"non-present"),
98 (ErrorCode&16?" (Instruction Fetch)":"")
100 Warning("User Pagefault: Instruction at %04x:%08x accessed %p",
101 Regs->CS, Regs->RIP, Addr);
102 __asm__ __volatile__ ("sti"); // Restart IRQs
103 // Threads_SegFault(Addr);
109 // -- Check Error Code --
111 Warning("Reserved Bits Trashed!");
114 Warning("%s %s %s memory%s",
115 (ErrorCode&4?"User":"Kernel"),
116 (ErrorCode&2?"write to":"read from"),
117 (ErrorCode&1?"bad/locked":"non-present"),
118 (ErrorCode&16?" (Instruction Fetch)":"")
122 Log("Code at %p accessed %p", Regs->RIP, Addr);
123 // Print Stack Backtrace
124 // Error_Backtrace(Regs->RIP, Regs->RBP);
126 MM_DumpTables(0, -1);
128 __asm__ __volatile__ ("cli");
134 * \brief Dumps the layout of the page tables
136 void MM_DumpTables(tVAddr Start, tVAddr End)
138 const tPAddr CHANGEABLE_BITS = 0xFF8;
139 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
140 tVAddr rangeStart = 0;
141 tPAddr expected = CHANGEABLE_BITS; // MASK is used because it's not a vaild value
145 Log("Table Entries: (%p to %p)", Start, End);
147 End &= (1L << 48) - 1;
149 Start >>= 12; End >>= 12;
151 for(page = Start, curPos = Start<<12;
153 curPos += 0x1000, page++)
155 if( curPos == 0x800000000000L )
156 curPos = 0xFFFF800000000000L;
158 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
159 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
160 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
161 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
165 !(PAGEMAPLVL4(page>>27) & PF_PRESENT)
166 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
167 || !(PAGEDIR(page>>9) & PF_PRESENT)
168 || !(PAGETABLE(page) & PF_PRESENT)
169 || (PAGETABLE(page) & MASK) != expected)
171 if(expected != CHANGEABLE_BITS) {
172 Log("%016x-0x%016x => %013x-%013x (%c%c%c%c)",
173 rangeStart, curPos - 1,
174 PAGETABLE(rangeStart>>12) & ~0xFFF,
175 (expected & ~0xFFF) - 1,
176 (expected & PF_PAGED ? 'p' : '-'),
177 (expected & PF_COW ? 'C' : '-'),
178 (expected & PF_USER ? 'U' : '-'),
179 (expected & PF_WRITE ? 'W' : '-')
181 expected = CHANGEABLE_BITS;
183 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
184 page += (1 << 27) - 1;
185 curPos += (1L << 39) - 0x1000;
186 //Debug("pml4 ent unset (page = 0x%x now)", page);
189 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
190 page += (1 << 18) - 1;
191 curPos += (1L << 30) - 0x1000;
192 //Debug("pdp ent unset (page = 0x%x now)", page);
195 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
196 page += (1 << 9) - 1;
197 curPos += (1L << 21) - 0x1000;
198 //Debug("pd ent unset (page = 0x%x now)", page);
201 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
203 expected = (PAGETABLE(page) & MASK);
206 if(expected != CHANGEABLE_BITS)
210 if(expected != CHANGEABLE_BITS) {
211 Log("%016x-%016x => %013x-%013x (%s%s%s%s)",
212 rangeStart, curPos - 1,
213 PAGETABLE(rangeStart>>12) & ~0xFFF,
214 (expected & ~0xFFF) - 1,
215 (expected & PF_PAGED ? "p" : "-"),
216 (expected & PF_COW ? "C" : "-"),
217 (expected & PF_USER ? "U" : "-"),
218 (expected & PF_WRITE ? "W" : "-")
225 * \brief Map a physical page to a virtual one
227 int MM_Map(tVAddr VAddr, tPAddr PAddr)
231 ENTER("xVAddr xPAddr", VAddr, PAddr);
233 // Check that the page hasn't been mapped already
237 ret = MM_GetPageEntry(VAddr, &tmp, &flags);
238 if( flags & PF_PRESENT ) {
245 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
247 tmp = MM_AllocPhys();
252 PAGEMAPLVL4(VAddr >> 39) = tmp | 3;
253 INVLPG( &PAGEDIRPTR( (VAddr>>39)<<9 ) );
254 memset( &PAGEDIRPTR( (VAddr>>39)<<9 ), 0, 4096 );
258 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
260 tmp = MM_AllocPhys();
265 PAGEDIRPTR(VAddr >> 30) = tmp | 3;
266 INVLPG( &PAGEDIR( (VAddr>>30)<<9 ) );
267 memset( &PAGEDIR( (VAddr>>30)<<9 ), 0, 0x1000 );
271 if( !(PAGEDIR(VAddr >> 21) & 1) )
273 tmp = MM_AllocPhys();
278 PAGEDIR(VAddr >> 21) = tmp | 3;
279 INVLPG( &PAGETABLE( (VAddr>>21)<<9 ) );
280 memset( &PAGETABLE( (VAddr>>21)<<9 ), 0, 4096 );
283 // Check if this virtual address is already mapped
284 if( PAGETABLE(VAddr >> PTAB_SHIFT) & 1 ) {
289 PAGETABLE(VAddr >> PTAB_SHIFT) = PAddr | 3;
298 * \brief Removed a mapped page
300 void MM_Unmap(tVAddr VAddr)
303 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
305 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
307 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
309 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
314 * \brief Allocate a block of memory at the specified virtual address
316 tPAddr MM_Allocate(tVAddr VAddr)
320 ENTER("xVAddr", VAddr);
322 // NOTE: This is hack, but I like my dumps to be neat
324 if( !MM_Map(VAddr, 0) ) // Make sure things are allocated
326 Warning("MM_Allocate: Unable to map, tables did not initialise");
333 ret = MM_AllocPhys();
334 LOG("ret = %x", ret);
340 if( !MM_Map(VAddr, ret) )
342 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
352 void MM_Deallocate(tVAddr VAddr)
356 phys = MM_GetPhysAddr(VAddr);
365 * \brief Get the page table entry of a virtual address
366 * \param Addr Virtual Address
367 * \param Phys Location to put the physical address
368 * \param Flags Flags on the entry (set to zero if unmapped)
369 * \return Size of the entry (in address bits) - 12 = 4KiB page
371 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags)
373 if(!Phys || !Flags) return 0;
375 // Check if the PML4 entry is present
376 if( !(PAGEMAPLVL4(Addr >> 39) & 1) ) {
381 // - Check for large page
382 if( PAGEMAPLVL4(Addr >> 39) & PF_LARGE ) {
383 *Phys = PAGEMAPLVL4(Addr >> 39) & ~0xFFF;
384 *Flags = PAGEMAPLVL4(Addr >> 39) & 0xFFF;
388 // Check the PDP entry
389 if( !(PAGEDIRPTR(Addr >> 30) & 1) ) {
394 // - Check for large page
395 if( PAGEDIRPTR(Addr >> 30) & PF_LARGE ) {
396 *Phys = PAGEDIRPTR(Addr >> 30) & ~0xFFF;
397 *Flags = PAGEDIRPTR(Addr >> 30) & 0xFFF;
402 if( !(PAGEDIR(Addr >> 21) & 1) ) {
407 // - Check for large page
408 if( PAGEDIR(Addr >> 21) & PF_LARGE ) {
409 *Phys = PAGEDIR(Addr >> 21) & ~0xFFF;
410 *Flags = PAGEDIR(Addr >> 21) & 0xFFF;
414 // And, check the page table entry
415 if( !(PAGETABLE(Addr >> PTAB_SHIFT) & 1) ) {
420 *Phys = PAGETABLE(Addr >> PTAB_SHIFT) & ~0xFFF;
421 *Flags = PAGETABLE(Addr >> PTAB_SHIFT) & 0xFFF;
427 * \brief Get the physical address of a virtual location
429 tPAddr MM_GetPhysAddr(tVAddr Addr)
434 MM_GetPageEntry(Addr, &ret, &flags);
436 return ret | (Addr & 0xFFF);
440 * \brief Sets the flags on a page
442 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
447 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
449 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
451 if( !(PAGEDIR(VAddr >> 21) & 1) )
453 if( !(PAGETABLE(VAddr >> 12) & 1) )
457 ent = &PAGETABLE(VAddr >> 12);
460 if( Mask & MM_PFLAG_RO )
462 if( Flags & MM_PFLAG_RO ) {
471 if( Mask & MM_PFLAG_KERNEL )
473 if( Flags & MM_PFLAG_KERNEL ) {
482 if( Mask & MM_PFLAG_COW )
484 if( Flags & MM_PFLAG_COW ) {
495 if( Mask & MM_PFLAG_EXEC )
497 if( Flags & MM_PFLAG_EXEC ) {
507 * \brief Get the flags applied to a page
509 Uint MM_GetFlags(tVAddr VAddr)
515 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
517 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
519 if( !(PAGEDIR(VAddr >> 21) & 1) )
521 if( !(PAGETABLE(VAddr >> 12) & 1) )
525 ent = &PAGETABLE(VAddr >> 12);
528 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
530 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
532 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
534 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
539 // --- Hardware Mappings ---
541 * \brief Map a range of hardware pages
543 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
545 Log_KernelPanic("MM", "TODO: Implement MM_MapHWPages");
550 * \brief Free a range of hardware pages
552 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
554 Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
557 // --- Tempory Mappings ---
558 tVAddr MM_MapTemp(tPAddr PAddr)
560 Log_KernelPanic("MM", "TODO: Implement MM_MapTemp");
564 void MM_FreeTemp(tVAddr VAddr)
566 Log_KernelPanic("MM", "TODO: Implement MM_FreeTemp");
571 // --- Address Space Clone --
572 tPAddr MM_Clone(void)
576 // #1 Create a copy of the PML4
577 ret = MM_AllocPhys();
580 Log_KernelPanic("MM", "TODO: Implement MM_Clone");
582 // #2 Alter the fractal pointer
583 // #3 Set Copy-On-Write to all user pages
588 void MM_ClearUser(void)
591 // #1 Traverse the structure < 2^47, Deref'ing all pages
592 // #2 Free tables/dirs/pdps once they have been cleared
594 for( addr = 0; addr < 0x800000000000; )
596 if( PAGEMAPLVL4(addr >> PML4_SHIFT) & 1 )
598 if( PAGEDIRPTR(addr >> PDP_SHIFT) & 1 )
600 if( PAGEDIR(addr >> PDIR_SHIFT) & 1 )
603 if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
604 MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
605 PAGETABLE(addr >> PTAB_SHIFT) = 0;
607 addr += 1 << PTAB_SHIFT;
608 // Dereference the PDIR Entry
609 if( (addr + (1 << PTAB_SHIFT)) >> PDIR_SHIFT != (addr >> PDIR_SHIFT) ) {
610 MM_DerefPhys( PAGEMAPLVL4(addr >> PDIR_SHIFT) & PADDR_MASK );
611 PAGEDIR(addr >> PDIR_SHIFT) = 0;
615 addr += 1 << PDIR_SHIFT;
618 // Dereference the PDP Entry
619 if( (addr + (1 << PDIR_SHIFT)) >> PDP_SHIFT != (addr >> PDP_SHIFT) ) {
620 MM_DerefPhys( PAGEMAPLVL4(addr >> PDP_SHIFT) & PADDR_MASK );
621 PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
625 addr += 1 << PDP_SHIFT;
628 // Dereference the PML4 Entry
629 if( (addr + (1 << PDP_SHIFT)) >> PML4_SHIFT != (addr >> PML4_SHIFT) ) {
630 MM_DerefPhys( PAGEMAPLVL4(addr >> PML4_SHIFT) & PADDR_MASK );
631 PAGEMAPLVL4(addr >> PML4_SHIFT) = 0;
635 addr += (tVAddr)1 << PML4_SHIFT;
641 tVAddr MM_NewWorkerStack(void)
643 Log_KernelPanic("MM", "TODO: Implement MM_NewWorkerStack");
648 * \brief Allocate a new kernel stack
650 tVAddr MM_NewKStack(void)
652 tVAddr base = MM_KSTACK_BASE;
654 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
656 if(MM_GetPhysAddr(base) != 0)
659 //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
660 for( i = 0; i < KERNEL_STACK_SIZE; i += 0x1000)
662 if( !MM_Allocate(base+i) )
664 Log_Warning("MM", "MM_NewKStack - Allocation failed");
665 for( i -= 0x1000; i; i -= 0x1000)
666 MM_Deallocate(base+i);
671 return base + KERNEL_STACK_SIZE;
673 Log_Warning("MM", "MM_NewKStack - No address space left\n");