4 * ARM7 Virtual Memory Manager
5 * - arch/arm7/mm_virt.c
12 #define AP_KRW_ONLY 1 // Kernel page
13 #define AP_KRO_ONLY 5 // Kernel RO page
14 #define AP_RW_BOTH 3 // Standard RW
15 #define AP_RO_BOTH 6 // COW Page
16 #define AP_RO_USER 2 // User RO Page
17 #define PADDR_MASK_LVL1 0xFFFFFC00
20 extern Uint32 kernel_table0[];
34 //#define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>20)])
35 #define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>22)])
36 #define USRFRACTAL(addr) (*((Uint32*)(0x7FDFF000) + ((addr)>>22)))
37 #define TLBIALL() __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0))
38 #define TLBIMVA(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 1" : : "r" (addr))
41 void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1);
42 int MM_int_AllocateCoarse(tVAddr VAddr, int Domain);
43 int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
44 int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
45 tVAddr MM_NewUserStack(void);
46 tPAddr MM_AllocateZero(tVAddr VAddr);
47 tPAddr MM_AllocateRootTable(void);
48 void MM_int_CloneTable(Uint32 *DestEnt, int Table);
49 tPAddr MM_Clone(void);
50 tVAddr MM_NewKStack(int bGlobal);
51 void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info);
52 //void MM_DumpTables(tVAddr Start, tVAddr End);
58 int MM_InitialiseVirtual(void)
63 void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1)
65 if(VAddr & 0x80000000) {
66 *Table0 = (void*)&kernel_table0; // Level 0
67 *Table1 = (void*)MM_TABLE1KERN; // Level 1
70 *Table0 = (void*)MM_TABLE0USER;
71 *Table1 = (void*)MM_TABLE1USER;
75 int MM_int_AllocateCoarse(tVAddr VAddr, int Domain)
77 Uint32 *table0, *table1;
81 ENTER("xVAddr iDomain", VAddr, Domain);
83 MM_int_GetTables(VAddr, &table0, &table1);
85 VAddr &= ~(0x400000-1); // 4MiB per "block", 1 Page
87 desc = &table0[ VAddr>>20];
88 LOG("desc = %p", desc);
90 // table0: 4 bytes = 1 MiB
92 LOG("desc[0] = %x", desc[0]);
93 LOG("desc[1] = %x", desc[1]);
94 LOG("desc[2] = %x", desc[2]);
95 LOG("desc[3] = %x", desc[3]);
97 if( (desc[0] & 3) != 0 || (desc[1] & 3) != 0
98 || (desc[2] & 3) != 0 || (desc[3] & 3) != 0 )
105 paddr = MM_AllocPhys();
113 *desc = paddr | (Domain << 5) | 1;
114 desc[1] = desc[0] + 0x400;
115 desc[2] = desc[0] + 0x800;
116 desc[3] = desc[0] + 0xC00;
118 if( VAddr < 0x80000000 ) {
119 // Log("USRFRACTAL(%p) = %p", VAddr, &USRFRACTAL(VAddr));
120 USRFRACTAL(VAddr) = paddr | 3;
123 // Log("FRACTAL(%p) = %p", VAddr, &FRACTAL(table1, VAddr));
124 FRACTAL(table1, VAddr) = paddr | 3;
134 int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi)
136 Uint32 *table0, *table1;
139 ENTER("pVAddr ppi", VAddr, pi);
141 MM_int_GetTables(VAddr, &table0, &table1);
143 desc = &table0[ VAddr >> 20 ];
144 LOG("desc = %p", desc);
148 case 12: // Small Page
149 case 16: // Large Page
151 if( (*desc & 3) == 0 ) {
152 MM_int_AllocateCoarse( VAddr, pi->Domain );
154 desc = &table1[ VAddr >> 12 ];
155 LOG("desc (2) = %p", desc);
159 // - Error if overwriting a large page
160 if( (*desc & 3) == 1 ) LEAVE_RET('i', 1);
161 if( pi->PhysAddr == 0 ) {
167 *desc = (pi->PhysAddr & 0xFFFFF000) | 2;
168 if(!pi->bExecutable) *desc |= 1; // XN
169 if(!pi->bGlobal) *desc |= 1 << 11; // NG
170 if( pi->bShared) *desc |= 1 << 10; // S
171 *desc |= (pi->AP & 3) << 4; // AP
172 *desc |= ((pi->AP >> 2) & 1) << 9; // APX
173 TLBIMVA(VAddr & 0xFFFFF000);
181 Log_Warning("MMVirt", "TODO: Implement large pages in MM_int_SetPageInfo");
184 case 20: // Section or unmapped
185 Warning("TODO: Implement sections");
187 case 24: // Supersection
188 // Error if not aligned
189 if( VAddr & 0xFFFFFF ) {
193 if( (*desc & 3) == 0 || ((*desc & 3) == 2 && (*desc & (1 << 18))) )
195 if( pi->PhysAddr == 0 ) {
197 // TODO: Apply to all entries
202 *desc = pi->PhysAddr & 0xFF000000;
203 // *desc |= ((pi->PhysAddr >> 32) & 0xF) << 20;
204 // *desc |= ((pi->PhysAddr >> 36) & 0x7) << 5;
205 *desc |= 2 | (1 << 18);
206 // TODO: Apply to all entries
219 int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi)
221 Uint32 *table0, *table1;
224 // LogF("MM_int_GetPageInfo: VAddr=%p, pi=%p\n", VAddr, pi);
226 MM_int_GetTables(VAddr, &table0, &table1);
228 desc = table0[ VAddr >> 20 ];
230 // if( VAddr > 0x90000000)
231 // LOG("table0 desc(%p) = %x", &table0[ VAddr >> 20 ], desc);
247 // 1: Coarse page table
249 // Domain from top level table
250 pi->Domain = (desc >> 5) & 7;
252 desc = table1[ VAddr >> 12 ];
253 // LOG("table1 desc(%p) = %x", &table1[ VAddr >> 12 ], desc);
260 // 1: Large Page (64KiB)
263 pi->PhysAddr = desc & 0xFFFF0000;
264 pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2);
265 pi->bExecutable = !(desc & 0x8000);
266 pi->bShared = (desc >> 10) & 1;
272 pi->PhysAddr = desc & 0xFFFFF000;
273 pi->bExecutable = !(desc & 1);
274 pi->bGlobal = !(desc >> 11);
275 pi->bShared = (desc >> 10) & 1;
276 pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2);
281 // 2: Section (or Supersection)
283 if( desc & (1 << 18) ) {
285 pi->PhysAddr = desc & 0xFF000000;
286 pi->PhysAddr |= (Uint64)((desc >> 20) & 0xF) << 32;
287 pi->PhysAddr |= (Uint64)((desc >> 5) & 0x7) << 36;
289 pi->Domain = 0; // Supersections default to zero
290 pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2);
295 pi->PhysAddr = desc & 0xFFF80000;
297 pi->Domain = (desc >> 5) & 7;
298 pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2);
301 // 3: Reserved (invalid)
312 tPAddr MM_GetPhysAddr(tVAddr VAddr)
315 if( MM_int_GetPageInfo(VAddr, &pi) )
317 return pi.PhysAddr | (VAddr & ((1 << pi.Size)-1));
320 Uint MM_GetFlags(tVAddr VAddr)
325 if( MM_int_GetPageInfo(VAddr, &pi) )
335 ret |= MM_PFLAG_KERNEL;
338 ret |= MM_PFLAG_KERNEL|MM_PFLAG_RO;
350 if( pi.bExecutable ) ret |= MM_PFLAG_EXEC;
354 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
359 if( MM_int_GetPageInfo(VAddr, &pi) )
362 curFlags = MM_GetPhysAddr(VAddr);
363 if( (curFlags & Mask) == Flags )
368 if( curFlags & MM_PFLAG_COW )
372 switch(curFlags & (MM_PFLAG_KERNEL|MM_PFLAG_RO) )
375 pi.AP = AP_RW_BOTH; break;
376 case MM_PFLAG_KERNEL:
377 pi.AP = AP_KRW_ONLY; break;
379 pi.AP = AP_RO_USER; break;
380 case MM_PFLAG_KERNEL|MM_PFLAG_RO:
381 pi.AP = AP_KRO_ONLY; break;
385 pi.bExecutable = !!(curFlags & MM_PFLAG_EXEC);
387 MM_int_SetPageInfo(VAddr, &pi);
390 int MM_Map(tVAddr VAddr, tPAddr PAddr)
392 tMM_PageInfo pi = {0};
393 // Log("MM_Map %P=>%p", PAddr, VAddr);
397 if(VAddr < USER_STACK_TOP)
400 pi.AP = AP_KRW_ONLY; // Kernel Read/Write
402 if( MM_int_SetPageInfo(VAddr, &pi) ) {
403 MM_DerefPhys(pi.PhysAddr);
409 tPAddr MM_Allocate(tVAddr VAddr)
411 tMM_PageInfo pi = {0};
413 ENTER("pVAddr", VAddr);
415 pi.PhysAddr = MM_AllocPhys();
416 if( pi.PhysAddr == 0 ) LEAVE_RET('i', 0);
418 if(VAddr < USER_STACK_TOP)
423 if( MM_int_SetPageInfo(VAddr, &pi) ) {
424 MM_DerefPhys(pi.PhysAddr);
428 LEAVE('x', pi.PhysAddr);
432 tPAddr MM_AllocateZero(tVAddr VAddr)
434 if( !giMM_ZeroPage ) {
435 giMM_ZeroPage = MM_Allocate(VAddr);
436 MM_RefPhys(giMM_ZeroPage);
437 memset((void*)VAddr, 0, PAGE_SIZE);
440 MM_RefPhys(giMM_ZeroPage);
441 MM_Map(VAddr, giMM_ZeroPage);
443 MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
444 return giMM_ZeroPage;
447 void MM_Deallocate(tVAddr VAddr)
451 if( MM_int_GetPageInfo(VAddr, &pi) ) return ;
453 if( pi.PhysAddr == 0 ) return;
454 MM_DerefPhys(pi.PhysAddr);
459 MM_int_SetPageInfo(VAddr, &pi);
462 tPAddr MM_AllocateRootTable(void)
466 ret = MM_AllocPhysRange(2, -1);
469 MM_DerefPhys(ret+0x1000);
470 ret = MM_AllocPhysRange(3, -1);
474 // Log("MM_AllocateRootTable: Second try not aligned, %P", ret);
477 MM_DerefPhys(ret + 0x2000);
478 // Log("MM_AllocateRootTable: Second try aligned, %P", ret);
482 // Log("MM_AllocateRootTable: Got it in one, %P", ret);
486 void MM_int_CloneTable(Uint32 *DestEnt, int Table)
490 Uint32 *cur = (void*)MM_TABLE0USER;
491 // Uint32 *cur = &FRACTAL(MM_TABLE1USER,0);
494 table = MM_AllocPhys();
497 tmp_map = (void*)MM_MapTemp(table);
499 for( i = 0; i < 1024; i ++ )
503 case 0: tmp_map[i] = 0; break;
506 Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable");
513 if( (cur[Table*256] & 0x230) == 0x030 )
514 cur[Table*256+i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO)
515 tmp_map[i] = cur[Table*256+i];
520 DestEnt[0] = table + 0*0x400 + 1;
521 DestEnt[1] = table + 1*0x400 + 1;
522 DestEnt[2] = table + 2*0x400 + 1;
523 DestEnt[3] = table + 3*0x400 + 1;
526 tPAddr MM_Clone(void)
529 Uint32 *new_lvl1_1, *new_lvl1_2, *cur;
533 ret = MM_AllocateRootTable();
535 cur = (void*)MM_TABLE0USER;
536 new_lvl1_1 = (void*)MM_MapTemp(ret);
537 new_lvl1_2 = (void*)MM_MapTemp(ret+0x1000);
538 tmp_map = new_lvl1_1;
539 for( i = 0; i < 0x800-4; i ++ )
543 tmp_map = &new_lvl1_2[-0x400];
546 case 0: tmp_map[i] = 0; break;
548 MM_int_CloneTable(&tmp_map[i], i);
549 i += 3; // Tables are alocated in blocks of 4
553 Log_Error("MMVirt", "TODO: Support Sections/Supersections in MM_Clone (i=%i)", i);
559 // Allocate Fractal table
562 tPAddr tmp = MM_AllocPhys();
563 Uint32 *table = (void*)MM_MapTemp(tmp);
565 register Uint32 __SP asm("sp");
567 // Map table to last 4MiB of user space
568 new_lvl1_2[0x3FC] = tmp + 0*0x400 + 1;
569 new_lvl1_2[0x3FD] = tmp + 1*0x400 + 1;
570 new_lvl1_2[0x3FE] = tmp + 2*0x400 + 1;
571 new_lvl1_2[0x3FF] = tmp + 3*0x400 + 1;
573 tmp_map = new_lvl1_1;
574 for( j = 0; j < 512; j ++ )
577 tmp_map = &new_lvl1_2[-0x400];
578 if( (tmp_map[j*4] & 3) == 1 )
580 table[j] = tmp_map[j*4] & PADDR_MASK_LVL1;// 0xFFFFFC00;
581 table[j] |= 0x813; // nG, Kernel Only, Small page, XN
587 table[j++] = (ret + 0x0000) | 0x813;
588 table[j++] = (ret + 0x1000) | 0x813;
590 for( ; j < 1024; j ++ )
593 // Get kernel stack bottom
594 sp = __SP & ~(MM_KSTACK_SIZE-1);
595 j = (sp / 0x1000) % 1024;
596 num = MM_KSTACK_SIZE/0x1000;
599 for(; num--; j ++, sp += 0x1000)
604 page = MM_AllocPhys();
605 table[j] = page | 0x813;
607 tmp_page = (void*)MM_MapTemp(page);
608 memcpy(tmp_page, (void*)sp, 0x1000);
609 MM_FreeTemp( (tVAddr) tmp_page );
612 MM_FreeTemp( (tVAddr)table );
615 MM_FreeTemp( (tVAddr)new_lvl1_1 );
616 MM_FreeTemp( (tVAddr)new_lvl1_2 );
621 tPAddr MM_ClearUser(void)
623 // TODO: Implement ClearUser
627 tVAddr MM_MapTemp(tPAddr PAddr)
632 for( ret = MM_TMPMAP_BASE; ret < MM_TMPMAP_END - PAGE_SIZE; ret += PAGE_SIZE )
634 if( MM_int_GetPageInfo(ret, &pi) == 0 )
637 // Log("MapTemp %P at %p", PAddr, ret);
638 MM_RefPhys(PAddr); // Counter the MM_Deallocate in FreeTemp
643 Log_Warning("MMVirt", "MM_MapTemp: All slots taken");
647 void MM_FreeTemp(tVAddr VAddr)
649 // TODO: Implement FreeTemp
650 if( VAddr < MM_TMPMAP_BASE || VAddr >= MM_TMPMAP_END ) {
651 Log_Warning("MMVirt", "MM_FreeTemp: Passed an addr not from MM_MapTemp (%p)", VAddr);
655 MM_Deallocate(VAddr);
658 tVAddr MM_MapHWPages(tPAddr PAddr, Uint NPages)
664 ENTER("xPAddr iNPages", PAddr, NPages);
666 // Scan for a location
667 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_END - NPages * PAGE_SIZE; ret += PAGE_SIZE )
669 // LOG("checking %p", ret);
670 // Check if there is `NPages` free pages
671 for( i = 0; i < NPages; i ++ )
673 if( MM_int_GetPageInfo(ret + i*PAGE_SIZE, &pi) == 0 )
676 // Nope, jump to after the used page found and try again
677 // LOG("i = %i, ==? %i", i, NPages);
679 ret += i * PAGE_SIZE;
684 for( i = 0; i < NPages; i ++ )
685 MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAddr);
690 Log_Warning("MMVirt", "MM_MapHWPages: No space for a %i page block", NPages);
695 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PAddr)
700 phys = MM_AllocPhysRange(Pages, MaxBits);
703 ret = MM_MapHWPages(phys, Pages);
709 void MM_UnmapHWPages(tVAddr Vaddr, Uint Number)
711 Log_Error("MMVirt", "TODO: Implement MM_UnmapHWPages");
714 tVAddr MM_NewKStack(int bShared)
716 tVAddr min_addr, max_addr;
720 min_addr = MM_GLOBALSTACKS;
721 max_addr = MM_GLOBALSTACKS_END;
724 min_addr = MM_KSTACK_BASE;
725 max_addr = MM_KSTACK_END;
728 // Locate a free slot
729 for( addr = min_addr; addr < max_addr; addr += MM_KSTACK_SIZE )
732 if( MM_int_GetPageInfo(addr+MM_KSTACK_SIZE-PAGE_SIZE, &pi) ) break;
735 // Check for an error
736 if(addr >= max_addr) {
741 for( ofs = PAGE_SIZE; ofs < MM_KSTACK_SIZE; ofs += PAGE_SIZE )
743 if( MM_Allocate(addr + ofs) == 0 )
748 MM_Deallocate(addr + ofs);
750 Log_Warning("MMVirt", "MM_NewKStack: Unable to allocate");
757 tVAddr MM_NewUserStack(void)
761 addr = USER_STACK_TOP - USER_STACK_SIZE;
762 if( MM_GetPhysAddr(addr + PAGE_SIZE) ) {
763 Log_Error("MMVirt", "Unable to create initial user stack, addr %p taken",
770 for( ofs = PAGE_SIZE; ofs < USER_STACK_SIZE; ofs += PAGE_SIZE )
773 if(ofs >= USER_STACK_SIZE - USER_STACK_COMM)
774 rv = MM_Allocate(addr + ofs);
776 rv = MM_AllocateZero(addr + ofs);
782 MM_Deallocate(addr + ofs);
784 Log_Warning("MMVirt", "MM_NewUserStack: Unable to allocate");
787 MM_SetFlags(addr+ofs, 0, MM_PFLAG_KERNEL);
789 Log("Return %p", addr + ofs);
790 MM_DumpTables(0, 0x80000000);
794 void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info)
796 if( giMM_ZeroPage && Info->PhysAddr == giMM_ZeroPage )
798 Log("%p => %8s - 0x%7x %i %x",
800 Info->Domain, Info->AP
805 Log("%p => %8x - 0x%7x %i %x",
806 Start, Info->PhysAddr-Len, Len,
807 Info->Domain, Info->AP
812 void MM_DumpTables(tVAddr Start, tVAddr End)
814 tVAddr range_start = 0, addr;
815 tMM_PageInfo pi, pi_old;
816 int i = 0, inRange=0;
820 Log("Page Table Dump:");
822 for( addr = Start; i == 0 || (addr && addr < End); i = 1 )
824 // Log("addr = %p", addr);
825 int rv = MM_int_GetPageInfo(addr, &pi);
827 || pi.Size != pi_old.Size
828 || pi.Domain != pi_old.Domain
829 || pi.AP != pi_old.AP
830 || pi_old.PhysAddr != pi.PhysAddr )
833 MM_int_DumpTableEnt(range_start, addr - range_start, &pi_old);
835 addr &= ~((1 << pi.Size)-1);
840 // Handle the zero page
841 if( !giMM_ZeroPage || pi_old.Size != 12 || pi_old.PhysAddr != giMM_ZeroPage )
842 pi_old.PhysAddr += 1 << pi_old.Size;
843 addr += 1 << pi_old.Size;
847 MM_int_DumpTableEnt(range_start, addr - range_start, &pi);