4 * Virtual Memory Manager
17 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
18 #define PAGE_MASK (((Uint)1 << 36)-1)
19 #define TABLE_MASK (((Uint)1 << 27)-1)
20 #define PDP_MASK (((Uint)1 << 18)-1)
21 #define PML4_MASK (((Uint)1 << 9)-1)
23 #define PF_PRESENT 0x1
28 #define PF_PAGED 0x400
29 #define PF_NX 0x80000000##00000000
32 #define PAGETABLE(idx) (*((tPAddr*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
33 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
34 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
35 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
37 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr));
40 void MM_InitVirt(void);
41 void MM_FinishVirtualInit(void);
42 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
43 void MM_DumpTables(tVAddr Start, tVAddr End);
44 int MM_Map(tVAddr VAddr, tPAddr PAddr);
45 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
50 void MM_InitVirt(void)
52 MM_DumpTables(0, -1L);
55 void MM_FinishVirtualInit(void)
60 * \brief Called on a page fault
62 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
64 // TODO: Copy on Write
66 if( gaPageDir [Addr>>22] & PF_PRESENT
67 && gaPageTable[Addr>>12] & PF_PRESENT
68 && gaPageTable[Addr>>12] & PF_COW )
71 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
73 gaPageTable[Addr>>12] &= ~PF_COW;
74 gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
78 //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
79 paddr = MM_DuplicatePage( Addr );
80 MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
81 gaPageTable[Addr>>12] &= PF_USER;
82 gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
85 INVLPG( Addr & ~0xFFF );
90 // If it was a user, tell the thread handler
92 Warning("%s %s %s memory%s",
93 (ErrorCode&4?"User":"Kernel"),
94 (ErrorCode&2?"write to":"read from"),
95 (ErrorCode&1?"bad/locked":"non-present"),
96 (ErrorCode&16?" (Instruction Fetch)":"")
98 Warning("User Pagefault: Instruction at %04x:%08x accessed %p",
99 Regs->CS, Regs->RIP, Addr);
100 __asm__ __volatile__ ("sti"); // Restart IRQs
101 // Threads_SegFault(Addr);
107 // -- Check Error Code --
109 Warning("Reserved Bits Trashed!");
112 Warning("%s %s %s memory%s",
113 (ErrorCode&4?"User":"Kernel"),
114 (ErrorCode&2?"write to":"read from"),
115 (ErrorCode&1?"bad/locked":"non-present"),
116 (ErrorCode&16?" (Instruction Fetch)":"")
120 Log("Code at %p accessed %p", Regs->RIP, Addr);
121 // Print Stack Backtrace
122 // Error_Backtrace(Regs->RIP, Regs->RBP);
124 MM_DumpTables(0, -1);
126 __asm__ __volatile__ ("cli");
132 * \brief Dumps the layout of the page tables
134 void MM_DumpTables(tVAddr Start, tVAddr End)
136 const tPAddr CHANGEABLE_BITS = 0xFF8;
137 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
138 tVAddr rangeStart = 0;
139 tPAddr expected = CHANGEABLE_BITS; // MASK is used because it's not a vaild value
143 Log("Table Entries: (%p to %p)", Start, End);
145 End &= (1L << 48) - 1;
147 Start >>= 12; End >>= 12;
149 for(page = Start, curPos = Start<<12;
151 curPos += 0x1000, page++)
153 if( curPos == 0x800000000000L )
154 curPos = 0xFFFF800000000000L;
156 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
157 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
158 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
159 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
163 !(PAGEMAPLVL4(page>>27) & PF_PRESENT)
164 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
165 || !(PAGEDIR(page>>9) & PF_PRESENT)
166 || !(PAGETABLE(page) & PF_PRESENT)
167 || (PAGETABLE(page) & MASK) != expected)
169 if(expected != CHANGEABLE_BITS) {
170 Log("%016x-0x%016x => %013x-%013x (%c%c%c%c)",
171 rangeStart, curPos - 1,
172 PAGETABLE(rangeStart>>12) & ~0xFFF,
173 (expected & ~0xFFF) - 1,
174 (expected & PF_PAGED ? 'p' : '-'),
175 (expected & PF_COW ? 'C' : '-'),
176 (expected & PF_USER ? 'U' : '-'),
177 (expected & PF_WRITE ? 'W' : '-')
179 expected = CHANGEABLE_BITS;
181 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
182 page += (1 << 27) - 1;
183 curPos += (1L << 39) - 0x1000;
184 //Debug("pml4 ent unset (page = 0x%x now)", page);
187 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
188 page += (1 << 18) - 1;
189 curPos += (1L << 30) - 0x1000;
190 //Debug("pdp ent unset (page = 0x%x now)", page);
193 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
194 page += (1 << 9) - 1;
195 curPos += (1L << 21) - 0x1000;
196 //Debug("pd ent unset (page = 0x%x now)", page);
199 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
201 expected = (PAGETABLE(page) & MASK);
204 if(expected != CHANGEABLE_BITS)
208 if(expected != CHANGEABLE_BITS) {
209 Log("%016x-%016x => %013x-%013x (%s%s%s%s)",
210 rangeStart, curPos - 1,
211 PAGETABLE(rangeStart>>12) & ~0xFFF,
212 (expected & ~0xFFF) - 1,
213 (expected & PF_PAGED ? "p" : "-"),
214 (expected & PF_COW ? "C" : "-"),
215 (expected & PF_USER ? "U" : "-"),
216 (expected & PF_WRITE ? "W" : "-")
223 * \brief Map a physical page to a virtual one
225 int MM_Map(tVAddr VAddr, tPAddr PAddr)
229 ENTER("xVAddr xPAddr", VAddr, PAddr);
231 // Check that the page hasn't been mapped already
235 ret = MM_GetPageEntry(VAddr, &tmp, &flags);
236 if( flags & PF_PRESENT ) {
243 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
245 tmp = MM_AllocPhys();
250 PAGEMAPLVL4(VAddr >> 39) = tmp | 3;
251 INVLPG( &PAGEDIRPTR( (VAddr>>39)<<9 ) );
252 memset( &PAGEDIRPTR( (VAddr>>39)<<9 ), 0, 4096 );
256 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
258 tmp = MM_AllocPhys();
263 PAGEDIRPTR(VAddr >> 30) = tmp | 3;
264 INVLPG( &PAGEDIR( (VAddr>>30)<<9 ) );
265 memset( &PAGEDIR( (VAddr>>30)<<9 ), 0, 0x1000 );
269 if( !(PAGEDIR(VAddr >> 21) & 1) )
271 tmp = MM_AllocPhys();
276 PAGEDIR(VAddr >> 21) = tmp | 3;
277 INVLPG( &PAGETABLE( (VAddr>>21)<<9 ) );
278 memset( &PAGETABLE( (VAddr>>21)<<9 ), 0, 4096 );
281 // Check if this virtual address is already mapped
282 if( PAGETABLE(VAddr >> PTAB_SHIFT) & 1 ) {
287 PAGETABLE(VAddr >> PTAB_SHIFT) = PAddr | 3;
296 * \brief Removed a mapped page
298 void MM_Unmap(tVAddr VAddr)
301 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
303 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
305 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
307 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
312 * \brief Allocate a block of memory at the specified virtual address
314 tPAddr MM_Allocate(tVAddr VAddr)
318 ENTER("xVAddr", VAddr);
320 // NOTE: This is hack, but I like my dumps to be neat
322 if( !MM_Map(VAddr, 0) ) // Make sure things are allocated
324 Warning("MM_Allocate: Unable to map, tables did not initialise");
331 ret = MM_AllocPhys();
332 LOG("ret = %x", ret);
338 if( !MM_Map(VAddr, ret) )
340 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
350 void MM_Deallocate(tVAddr VAddr)
354 phys = MM_GetPhysAddr(VAddr);
363 * \brief Get the page table entry of a virtual address
364 * \param Addr Virtual Address
365 * \param Phys Location to put the physical address
366 * \param Flags Flags on the entry (set to zero if unmapped)
367 * \return Size of the entry (in address bits) - 12 = 4KiB page
369 int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags)
371 if(!Phys || !Flags) return 0;
373 // Check if the PML4 entry is present
374 if( !(PAGEMAPLVL4(Addr >> 39) & 1) ) {
379 // - Check for large page
380 if( PAGEMAPLVL4(Addr >> 39) & PF_LARGE ) {
381 *Phys = PAGEMAPLVL4(Addr >> 39) & ~0xFFF;
382 *Flags = PAGEMAPLVL4(Addr >> 39) & 0xFFF;
386 // Check the PDP entry
387 if( !(PAGEDIRPTR(Addr >> 30) & 1) ) {
392 // - Check for large page
393 if( PAGEDIRPTR(Addr >> 30) & PF_LARGE ) {
394 *Phys = PAGEDIRPTR(Addr >> 30) & ~0xFFF;
395 *Flags = PAGEDIRPTR(Addr >> 30) & 0xFFF;
400 if( !(PAGEDIR(Addr >> 21) & 1) ) {
405 // - Check for large page
406 if( PAGEDIR(Addr >> 21) & PF_LARGE ) {
407 *Phys = PAGEDIR(Addr >> 21) & ~0xFFF;
408 *Flags = PAGEDIR(Addr >> 21) & 0xFFF;
412 // And, check the page table entry
413 if( !(PAGETABLE(Addr >> PTAB_SHIFT) & 1) ) {
418 *Phys = PAGETABLE(Addr >> PTAB_SHIFT) & ~0xFFF;
419 *Flags = PAGETABLE(Addr >> PTAB_SHIFT) & 0xFFF;
425 * \brief Get the physical address of a virtual location
427 tPAddr MM_GetPhysAddr(tVAddr Addr)
432 MM_GetPageEntry(Addr, &ret, &flags);
434 return ret | (Addr & 0xFFF);
438 * \brief Sets the flags on a page
440 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
445 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
447 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
449 if( !(PAGEDIR(VAddr >> 21) & 1) )
451 if( !(PAGETABLE(VAddr >> 12) & 1) )
455 ent = &PAGETABLE(VAddr >> 12);
458 if( Mask & MM_PFLAG_RO )
460 if( Flags & MM_PFLAG_RO ) {
469 if( Mask & MM_PFLAG_KERNEL )
471 if( Flags & MM_PFLAG_KERNEL ) {
480 if( Mask & MM_PFLAG_COW )
482 if( Flags & MM_PFLAG_COW ) {
493 if( Mask & MM_PFLAG_EXEC )
495 if( Flags & MM_PFLAG_EXEC ) {
505 * \brief Get the flags applied to a page
507 Uint MM_GetFlags(tVAddr VAddr)
513 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
515 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
517 if( !(PAGEDIR(VAddr >> 21) & 1) )
519 if( !(PAGETABLE(VAddr >> 12) & 1) )
523 ent = &PAGETABLE(VAddr >> 12);
526 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
528 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
530 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
532 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
537 // --- Hardware Mappings ---
539 * \brief Map a range of hardware pages
541 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
543 Log_KernelPanic("MM", "TODO: Implement MM_MapHWPages");
548 * \brief Free a range of hardware pages
550 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
552 Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
555 // --- Tempory Mappings ---
556 tVAddr MM_MapTemp(tPAddr PAddr)
558 Log_KernelPanic("MM", "TODO: Implement MM_MapTemp");
562 void MM_FreeTemp(tVAddr VAddr)
564 Log_KernelPanic("MM", "TODO: Implement MM_FreeTemp");
569 // --- Address Space Clone --
570 tPAddr MM_Clone(void)
574 // #1 Create a copy of the PML4
575 ret = MM_AllocPhys();
578 Log_KernelPanic("MM", "TODO: Implement MM_Clone");
580 // #2 Alter the fractal pointer
581 // #3 Set Copy-On-Write to all user pages
586 void MM_ClearUser(void)
589 // #1 Traverse the structure < 2^47, Deref'ing all pages
590 // #2 Free tables/dirs/pdps once they have been cleared
592 for( addr = 0; addr < 0x800000000000; )
594 if( PAGEMAPLVL4(addr >> PML4_SHIFT) & 1 )
596 if( PAGEDIRPTR(addr >> PDP_SHIFT) & 1 )
598 if( PAGEDIR(addr >> PDIR_SHIFT) & 1 )
601 if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
602 MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
603 PAGETABLE(addr >> PTAB_SHIFT) = 0;
605 addr += 1 << PTAB_SHIFT;
606 // Dereference the PDIR Entry
607 if( (addr + (1 << PTAB_SHIFT)) >> PDIR_SHIFT != (addr >> PDIR_SHIFT) ) {
608 MM_DerefPhys( PAGEMAPLVL4(addr >> PDIR_SHIFT) & PADDR_MASK );
609 PAGEDIR(addr >> PDIR_SHIFT) = 0;
613 addr += 1 << PDIR_SHIFT;
616 // Dereference the PDP Entry
617 if( (addr + (1 << PDIR_SHIFT)) >> PDP_SHIFT != (addr >> PDP_SHIFT) ) {
618 MM_DerefPhys( PAGEMAPLVL4(addr >> PDP_SHIFT) & PADDR_MASK );
619 PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
623 addr += 1 << PDP_SHIFT;
626 // Dereference the PML4 Entry
627 if( (addr + (1 << PDP_SHIFT)) >> PML4_SHIFT != (addr >> PML4_SHIFT) ) {
628 MM_DerefPhys( PAGEMAPLVL4(addr >> PML4_SHIFT) & PADDR_MASK );
629 PAGEMAPLVL4(addr >> PML4_SHIFT) = 0;
633 addr += (tVAddr)1 << PML4_SHIFT;
639 tVAddr MM_NewWorkerStack(void)
641 Log_KernelPanic("MM", "TODO: Implement MM_NewWorkerStack");
646 * \brief Allocate a new kernel stack
648 tVAddr MM_NewKStack(void)
650 tVAddr base = MM_KSTACK_BASE;
652 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
654 if(MM_GetPhysAddr(base) != 0)
657 //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
658 for( i = 0; i < KERNEL_STACK_SIZE; i += 0x1000)
661 return base + KERNEL_STACK_SIZE;
663 Log_Warning("MM", "MM_NewKStack - No address space left\n");