4 * Virtual Memory Manager
17 #define PADDR_MASK 0x7FFFFFFF##FFFFF000
18 #define PAGE_MASK (((Uint)1 << 36)-1)
19 #define TABLE_MASK (((Uint)1 << 27)-1)
20 #define PDP_MASK (((Uint)1 << 18)-1)
21 #define PML4_MASK (((Uint)1 << 9)-1)
23 #define PF_PRESENT 0x1
27 #define PF_PAGED 0x400
28 #define PF_NX 0x80000000##00000000
31 #define PAGETABLE(idx) (*((tPAddr*)MM_FRACTAL_BASE+((idx)&PAGE_MASK)))
32 #define PAGEDIR(idx) PAGETABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
33 #define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
34 #define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
36 #define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr));
39 void MM_InitVirt(void);
40 void MM_FinishVirtualInit(void);
41 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
42 void MM_DumpTables(tVAddr Start, tVAddr End);
43 int MM_Map(tVAddr VAddr, tPAddr PAddr);
48 void MM_InitVirt(void)
50 MM_DumpTables(0, -1L);
53 void MM_FinishVirtualInit(void)
58 * \brief Called on a page fault
60 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
62 // TODO: Copy on Write
64 if( gaPageDir [Addr>>22] & PF_PRESENT
65 && gaPageTable[Addr>>12] & PF_PRESENT
66 && gaPageTable[Addr>>12] & PF_COW )
69 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
71 gaPageTable[Addr>>12] &= ~PF_COW;
72 gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
76 //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
77 paddr = MM_DuplicatePage( Addr );
78 MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
79 gaPageTable[Addr>>12] &= PF_USER;
80 gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
83 INVLPG( Addr & ~0xFFF );
88 // If it was a user, tell the thread handler
90 Warning("%s %s %s memory%s",
91 (ErrorCode&4?"User":"Kernel"),
92 (ErrorCode&2?"write to":"read from"),
93 (ErrorCode&1?"bad/locked":"non-present"),
94 (ErrorCode&16?" (Instruction Fetch)":"")
96 Warning("User Pagefault: Instruction at %04x:%08x accessed %p",
97 Regs->CS, Regs->RIP, Addr);
98 __asm__ __volatile__ ("sti"); // Restart IRQs
99 // Threads_SegFault(Addr);
105 // -- Check Error Code --
107 Warning("Reserved Bits Trashed!");
110 Warning("%s %s %s memory%s",
111 (ErrorCode&4?"User":"Kernel"),
112 (ErrorCode&2?"write to":"read from"),
113 (ErrorCode&1?"bad/locked":"non-present"),
114 (ErrorCode&16?" (Instruction Fetch)":"")
118 Log("Code at %p accessed %p", Regs->RIP, Addr);
119 // Print Stack Backtrace
120 // Error_Backtrace(Regs->RIP, Regs->RBP);
122 MM_DumpTables(0, -1);
124 __asm__ __volatile__ ("cli");
130 * \brief Dumps the layout of the page tables
132 void MM_DumpTables(tVAddr Start, tVAddr End)
134 const tPAddr CHANGEABLE_BITS = 0xFF8;
135 const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
136 tVAddr rangeStart = 0;
137 tPAddr expected = CHANGEABLE_BITS; // MASK is used because it's not a vaild value
141 Log("Table Entries: (%p to %p)", Start, End);
143 End &= (1L << 48) - 1;
145 Start >>= 12; End >>= 12;
147 for(page = Start, curPos = Start<<12;
149 curPos += 0x1000, page++)
151 if( curPos == 0x800000000000L )
152 curPos = 0xFFFF800000000000L;
154 //Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
155 //Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
156 //Debug("&PAGEDIR(%i page>>9) = %p", page>>9, &PAGEDIR(page>>9));
157 //Debug("&PAGETABLE(%i page) = %p", page, &PAGETABLE(page));
161 !(PAGEMAPLVL4(page>>27) & PF_PRESENT)
162 || !(PAGEDIRPTR(page>>18) & PF_PRESENT)
163 || !(PAGEDIR(page>>9) & PF_PRESENT)
164 || !(PAGETABLE(page) & PF_PRESENT)
165 || (PAGETABLE(page) & MASK) != expected)
167 if(expected != CHANGEABLE_BITS) {
168 Log("%016x-0x%016x => %013x-%013x (%c%c%c%c)",
169 rangeStart, curPos - 1,
170 PAGETABLE(rangeStart>>12) & ~0xFFF,
171 (expected & ~0xFFF) - 1,
172 (expected & PF_PAGED ? 'p' : '-'),
173 (expected & PF_COW ? 'C' : '-'),
174 (expected & PF_USER ? 'U' : '-'),
175 (expected & PF_WRITE ? 'W' : '-')
177 expected = CHANGEABLE_BITS;
179 if( !(PAGEMAPLVL4(page>>27) & PF_PRESENT) ) {
180 page += (1 << 27) - 1;
181 curPos += (1L << 39) - 0x1000;
182 //Debug("pml4 ent unset (page = 0x%x now)", page);
185 if( !(PAGEDIRPTR(page>>18) & PF_PRESENT) ) {
186 page += (1 << 18) - 1;
187 curPos += (1L << 30) - 0x1000;
188 //Debug("pdp ent unset (page = 0x%x now)", page);
191 if( !(PAGEDIR(page>>9) & PF_PRESENT) ) {
192 page += (1 << 9) - 1;
193 curPos += (1L << 21) - 0x1000;
194 //Debug("pd ent unset (page = 0x%x now)", page);
197 if( !(PAGETABLE(page) & PF_PRESENT) ) continue;
199 expected = (PAGETABLE(page) & MASK);
202 if(expected != CHANGEABLE_BITS)
206 if(expected != CHANGEABLE_BITS) {
207 Log("%016x-%016x => %013x-%013x (%s%s%s%s)",
208 rangeStart, curPos - 1,
209 PAGETABLE(rangeStart>>12) & ~0xFFF,
210 (expected & ~0xFFF) - 1,
211 (expected & PF_PAGED ? "p" : "-"),
212 (expected & PF_COW ? "C" : "-"),
213 (expected & PF_USER ? "U" : "-"),
214 (expected & PF_WRITE ? "W" : "-")
221 * \brief Map a physical page to a virtual one
223 int MM_Map(tVAddr VAddr, tPAddr PAddr)
227 ENTER("xVAddr xPAddr", VAddr, PAddr);
230 //Log(" MM_Map: &PAGEMAPLVL4(%x) = %x", VAddr >> 39, &PAGEMAPLVL4(VAddr >> 39));
231 //Log(" MM_Map: &PAGEDIRPTR(%x) = %x", VAddr >> 30, &PAGEDIRPTR(VAddr >> 30));
232 //Log(" MM_Map: &PAGEDIR(%x) = %x", VAddr >> 21, &PAGEDIR(VAddr >> 21));
233 //Log(" MM_Map: &PAGETABLE(%x) = %x", VAddr >> 12, &PAGETABLE(VAddr >> 12));
234 //Log(" MM_Map: &PAGETABLE(0) = %x", &PAGETABLE(0));
235 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
237 tmp = MM_AllocPhys();
239 PAGEMAPLVL4(VAddr >> 39) = tmp | 3;
240 INVLPG( &PAGEDIRPTR( (VAddr>>39)<<9 ) );
241 memset( &PAGEDIRPTR( (VAddr>>39)<<9 ), 0, 4096 );
245 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
247 tmp = MM_AllocPhys();
249 PAGEDIRPTR(VAddr >> 30) = tmp | 3;
250 INVLPG( &PAGEDIR( (VAddr>>30)<<9 ) );
251 memset( &PAGEDIR( (VAddr>>30)<<9 ), 0, 0x1000 );
255 if( !(PAGEDIR(VAddr >> 21) & 1) )
257 tmp = MM_AllocPhys();
259 PAGEDIR(VAddr >> 21) = tmp | 3;
260 INVLPG( &PAGETABLE( (VAddr>>21)<<9 ) );
261 memset( &PAGETABLE( (VAddr>>21)<<9 ), 0, 4096 );
264 // Check if this virtual address is already mapped
265 if( PAGETABLE(VAddr >> PTAB_SHIFT) & 1 )
268 PAGETABLE(VAddr >> PTAB_SHIFT) = PAddr | 3;
277 * \brief Removed a mapped page
279 void MM_Unmap(tVAddr VAddr)
282 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) ) return ;
284 if( !(PAGEDIRPTR(VAddr >> 30) & 1) ) return ;
286 if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
288 PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
293 * \brief Allocate a block of memory at the specified virtual address
295 tPAddr MM_Allocate(tVAddr VAddr)
299 ENTER("xVAddr", VAddr);
301 // NOTE: This is hack, but I like my dumps to be neat
303 if( !MM_Map(VAddr, 0) ) // Make sure things are allocated
305 Warning("MM_Allocate: Unable to map, tables did not initialise");
312 ret = MM_AllocPhys();
313 LOG("ret = %x", ret);
319 if( !MM_Map(VAddr, ret) )
321 Warning("MM_Allocate: Unable to map. Strange, we should have errored earlier");
331 void MM_Deallocate(tVAddr VAddr)
335 phys = MM_GetPhysAddr(VAddr);
344 * \brief Get the physical address of a virtual location
346 tPAddr MM_GetPhysAddr(tVAddr Addr)
348 if( !(PAGEMAPLVL4(Addr >> 39) & 1) )
350 if( !(PAGEDIRPTR(Addr >> 30) & 1) )
352 if( !(PAGEDIR(Addr >> 21) & 1) )
354 if( !(PAGETABLE(Addr >> PTAB_SHIFT) & 1) )
357 return (PAGETABLE(Addr >> PTAB_SHIFT) & ~0xFFF) | (Addr & 0xFFF);
361 * \brief Sets the flags on a page
363 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
368 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
370 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
372 if( !(PAGEDIR(VAddr >> 21) & 1) )
374 if( !(PAGETABLE(VAddr >> 12) & 1) )
378 ent = &PAGETABLE(VAddr >> 12);
381 if( Mask & MM_PFLAG_RO )
383 if( Flags & MM_PFLAG_RO ) {
392 if( Mask & MM_PFLAG_KERNEL )
394 if( Flags & MM_PFLAG_KERNEL ) {
403 if( Mask & MM_PFLAG_COW )
405 if( Flags & MM_PFLAG_COW ) {
416 if( Mask & MM_PFLAG_EXEC )
418 if( Flags & MM_PFLAG_EXEC ) {
428 * \brief Get the flags applied to a page
430 Uint MM_GetFlags(tVAddr VAddr)
436 if( !(PAGEMAPLVL4(VAddr >> 39) & 1) )
438 if( !(PAGEDIRPTR(VAddr >> 30) & 1) )
440 if( !(PAGEDIR(VAddr >> 21) & 1) )
442 if( !(PAGETABLE(VAddr >> 12) & 1) )
446 ent = &PAGETABLE(VAddr >> 12);
449 if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
451 if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
453 if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
455 if( !(*ent & PF_NX) ) ret |= MM_PFLAG_EXEC;
460 // --- Hardware Mappings ---
462 * \brief Map a range of hardware pages
464 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
466 Log_KernelPanic("MM", "TODO: Implement MM_MapHWPages");
471 * \brief Free a range of hardware pages
473 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
475 Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
478 // --- Tempory Mappings ---
479 tVAddr MM_MapTemp(tPAddr PAddr)
481 Log_KernelPanic("MM", "TODO: Implement MM_MapTemp");
485 void MM_FreeTemp(tVAddr VAddr)
487 Log_KernelPanic("MM", "TODO: Implement MM_FreeTemp");
492 // --- Address Space Clone --
493 tPAddr MM_Clone(void)
497 // #1 Create a copy of the PML4
498 ret = MM_AllocPhys();
501 Log_KernelPanic("MM", "TODO: Implement MM_Clone");
503 // #2 Alter the fractal pointer
504 // #3 Set Copy-On-Write to all user pages
509 void MM_ClearUser(void)
512 // #1 Traverse the structure < 2^47, Deref'ing all pages
513 // #2 Free tables/dirs/pdps once they have been cleared
515 for( addr = 0; addr < 0x800000000000; )
517 if( PAGEMAPLVL4(addr >> PML4_SHIFT) & 1 )
519 if( PAGEDIRPTR(addr >> PDP_SHIFT) & 1 )
521 if( PAGEDIR(addr >> PDIR_SHIFT) & 1 )
524 if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
525 MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
526 PAGETABLE(addr >> PTAB_SHIFT) = 0;
528 addr += 1 << PTAB_SHIFT;
529 // Dereference the PDIR Entry
530 if( (addr + (1 << PTAB_SHIFT)) >> PDIR_SHIFT != (addr >> PDIR_SHIFT) ) {
531 MM_DerefPhys( PAGEMAPLVL4(addr >> PDIR_SHIFT) & PADDR_MASK );
532 PAGEDIR(addr >> PDIR_SHIFT) = 0;
536 addr += 1 << PDIR_SHIFT;
539 // Dereference the PDP Entry
540 if( (addr + (1 << PDIR_SHIFT)) >> PDP_SHIFT != (addr >> PDP_SHIFT) ) {
541 MM_DerefPhys( PAGEMAPLVL4(addr >> PDP_SHIFT) & PADDR_MASK );
542 PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
546 addr += 1 << PDP_SHIFT;
549 // Dereference the PML4 Entry
550 if( (addr + (1 << PDP_SHIFT)) >> PML4_SHIFT != (addr >> PML4_SHIFT) ) {
551 MM_DerefPhys( PAGEMAPLVL4(addr >> PML4_SHIFT) & PADDR_MASK );
552 PAGEMAPLVL4(addr >> PML4_SHIFT) = 0;
556 addr += (tVAddr)1 << PML4_SHIFT;
562 tVAddr MM_NewWorkerStack(void)
564 Log_KernelPanic("MM", "TODO: Implement MM_NewWorkerStack");
569 * \brief Allocate a new kernel stack
571 tVAddr MM_NewKStack(void)
573 tVAddr base = MM_KSTACK_BASE;
575 for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
577 if(MM_GetPhysAddr(base) != 0)
580 Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
581 for( i = 0; i < KERNEL_STACK_SIZE; i += 0x1000)
584 return base + KERNEL_STACK_SIZE;
586 Log_Warning("MM", "MM_NewKStack - No address space left\n");