4 * ARM7 Virtual Memory Manager
5 * - arch/arm7/mm_virt.c
15 #define AP_KRW_ONLY 1 // Kernel page
16 #define AP_KRO_ONLY 5 // Kernel RO page
17 #define AP_RW_BOTH 3 // Standard RW
18 #define AP_RO_BOTH 7 // COW Page
19 #define AP_RO_USER 2 // User RO Page
20 #define PADDR_MASK_LVL1 0xFFFFFC00
22 const char * const caAPValueNames[] = {
23 "AP_NOACCESS", "AP_KRW_ONLY",
24 "AP_RO_USER", "AP_RW_BOTH",
25 "AP_???_4", "AP_KRO_ONLY",
26 "AP_???_6", "AP_RO_BOTH"
30 extern Uint32 kernel_table0[];
44 //#define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>20)])
45 #define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>22)])
46 #define USRFRACTAL(addr) (*((Uint32*)(0x7FDFF000) + ((addr)>>22)))
47 #define TLBIALL() __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0))
48 #define TLBIMVA(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 1;dsb;isb" : : "r" (((addr)&~0xFFF)|1):"memory")
49 #define DCCMVAC(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 1" : : "r" ((addr)&~0xFFF))
52 void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1);
53 int MM_int_AllocateCoarse(tVAddr VAddr, int Domain);
54 int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
55 int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
56 tVAddr MM_NewUserStack(void);
57 tPAddr MM_AllocateZero(tVAddr VAddr);
58 tPAddr MM_AllocateRootTable(void);
59 void MM_int_CloneTable(Uint32 *DestEnt, int Table);
60 tPAddr MM_Clone(void);
61 tVAddr MM_NewKStack(int bGlobal);
62 void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info);
63 //void MM_DumpTables(tVAddr Start, tVAddr End);
64 void MM_PageFault(Uint32 PC, Uint32 Addr, Uint32 DFSR, int bPrefetch, Uint32 UserLR);
70 int MM_InitialiseVirtual(void)
75 void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1)
77 if(VAddr & 0x80000000) {
78 *Table0 = (void*)&kernel_table0; // Level 0
79 *Table1 = (void*)MM_TABLE1KERN; // Level 1
82 *Table0 = (void*)MM_TABLE0USER;
83 *Table1 = (void*)MM_TABLE1USER;
87 int MM_int_AllocateCoarse(tVAddr VAddr, int Domain)
89 Uint32 *table0, *table1;
93 ENTER("xVAddr iDomain", VAddr, Domain);
95 MM_int_GetTables(VAddr, &table0, &table1);
97 VAddr &= ~(0x400000-1); // 4MiB per "block", 1 Page
99 desc = &table0[ VAddr>>20];
100 LOG("desc = %p", desc);
102 // table0: 4 bytes = 1 MiB
104 LOG("desc[0] = %x", desc[0]);
105 LOG("desc[1] = %x", desc[1]);
106 LOG("desc[2] = %x", desc[2]);
107 LOG("desc[3] = %x", desc[3]);
109 if( (desc[0] & 3) != 0 || (desc[1] & 3) != 0
110 || (desc[2] & 3) != 0 || (desc[3] & 3) != 0 )
117 paddr = MM_AllocPhys();
125 *desc = paddr | (Domain << 5) | 1;
126 desc[1] = desc[0] + 0x400;
127 desc[2] = desc[0] + 0x800;
128 desc[3] = desc[0] + 0xC00;
130 if( VAddr < 0x80000000 ) {
131 USRFRACTAL(VAddr) = paddr | 0x13;
134 FRACTAL(table1, VAddr) = paddr | 0x13;
140 memset( (void*)&table1[ (VAddr >> 12) & ~(1024-1) ], 0, 0x1000 );
146 int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi)
148 Uint32 *table0, *table1;
151 ENTER("pVAddr ppi", VAddr, pi);
153 MM_int_GetTables(VAddr, &table0, &table1);
155 desc = &table0[ VAddr >> 20 ];
156 LOG("desc = %p", desc);
160 case 12: // Small Page
161 case 16: // Large Page
163 if( (*desc & 3) == 0 ) {
164 MM_int_AllocateCoarse( VAddr, pi->Domain );
166 desc = &table1[ VAddr >> 12 ];
167 LOG("desc (2) = %p", desc);
171 // - Error if overwriting a large page
172 if( (*desc & 3) == 1 ) LEAVE_RET('i', 1);
173 if( pi->PhysAddr == 0 ) {
176 DCCMVAC( (tVAddr) desc );
177 // #warning "HACK: TLBIALL"
183 *desc = (pi->PhysAddr & 0xFFFFF000) | 2;
184 if(!pi->bExecutable) *desc |= 1; // XN
185 if(!pi->bGlobal) *desc |= 1 << 11; // nG
186 if( pi->bShared) *desc |= 1 << 10; // S
187 *desc |= (pi->AP & 3) << 4; // AP
188 *desc |= ((pi->AP >> 2) & 1) << 9; // APX
190 // #warning "HACK: TLBIALL"
192 DCCMVAC( (tVAddr) desc );
199 Log_Warning("MMVirt", "TODO: Implement large pages in MM_int_SetPageInfo");
202 case 20: // Section or unmapped
203 Log_Warning("MMVirt", "TODO: Implement sections in MM_int_SetPageInfo");
205 case 24: // Supersection
206 // Error if not aligned
207 if( VAddr & 0xFFFFFF ) {
211 if( (*desc & 3) == 0 || ((*desc & 3) == 2 && (*desc & (1 << 18))) )
213 if( pi->PhysAddr == 0 ) {
218 *desc = pi->PhysAddr & 0xFF000000;
219 // *desc |= ((pi->PhysAddr >> 32) & 0xF) << 20;
220 // *desc |= ((pi->PhysAddr >> 36) & 0x7) << 5;
221 *desc |= 2 | (1 << 18);
223 // TODO: Apply to all entries
224 Log_Warning("MMVirt", "TODO: Apply changes to all entries of supersections");
229 Log_Warning("MMVirt", "TODO: 24-bit not on supersection?");
238 int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi)
240 Uint32 *table0, *table1;
243 // LogF("MM_int_GetPageInfo: VAddr=%p, pi=%p\n", VAddr, pi);
245 MM_int_GetTables(VAddr, &table0, &table1);
247 desc = table0[ VAddr >> 20 ];
249 // if( VAddr > 0x90000000)
250 // LOG("table0 desc(%p) = %x", &table0[ VAddr >> 20 ], desc);
266 // 1: Coarse page table
268 // Domain from top level table
269 pi->Domain = (desc >> 5) & 7;
271 desc = table1[ VAddr >> 12 ];
272 // LOG("table1 desc(%p) = %x", &table1[ VAddr >> 12 ], desc);
279 // 1: Large Page (64KiB)
282 pi->PhysAddr = desc & 0xFFFF0000;
283 pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2);
284 pi->bExecutable = !(desc & 0x8000);
285 pi->bShared = (desc >> 10) & 1;
291 pi->PhysAddr = desc & 0xFFFFF000;
292 pi->bExecutable = !(desc & 1);
293 pi->bGlobal = !(desc >> 11);
294 pi->bShared = (desc >> 10) & 1;
295 pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2);
300 // 2: Section (or Supersection)
302 if( desc & (1 << 18) ) {
304 pi->PhysAddr = desc & 0xFF000000;
305 pi->PhysAddr |= (Uint64)((desc >> 20) & 0xF) << 32;
306 pi->PhysAddr |= (Uint64)((desc >> 5) & 0x7) << 36;
308 pi->Domain = 0; // Supersections default to zero
309 pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2);
314 pi->PhysAddr = desc & 0xFFF80000;
316 pi->Domain = (desc >> 5) & 7;
317 pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2);
320 // 3: Reserved (invalid)
331 tPAddr MM_GetPhysAddr(const void *Ptr)
334 if( MM_int_GetPageInfo((tVAddr)Ptr, &pi) )
336 return pi.PhysAddr | ((tVAddr)Ptr & ((1 << pi.Size)-1));
339 Uint MM_GetFlags(tVAddr VAddr)
344 if( MM_int_GetPageInfo(VAddr, &pi) )
354 ret |= MM_PFLAG_KERNEL;
357 ret |= MM_PFLAG_KERNEL|MM_PFLAG_RO;
369 if( pi.bExecutable ) ret |= MM_PFLAG_EXEC;
373 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
378 if( MM_int_GetPageInfo(VAddr, &pi) )
381 curFlags = MM_GetFlags(VAddr);
382 if( (curFlags & Mask) == Flags )
387 if( curFlags & MM_PFLAG_COW )
391 switch(curFlags & (MM_PFLAG_KERNEL|MM_PFLAG_RO) )
394 pi.AP = AP_RW_BOTH; break;
395 case MM_PFLAG_KERNEL:
396 pi.AP = AP_KRW_ONLY; break;
398 pi.AP = AP_RO_USER; break;
399 case MM_PFLAG_KERNEL|MM_PFLAG_RO:
400 pi.AP = AP_KRO_ONLY; break;
404 pi.bExecutable = !!(curFlags & MM_PFLAG_EXEC);
406 MM_int_SetPageInfo(VAddr, &pi);
409 int MM_IsValidBuffer(tVAddr Addr, size_t Size)
414 Size += Addr & (PAGE_SIZE-1);
415 Addr &= ~(PAGE_SIZE-1);
417 if( MM_int_GetPageInfo(Addr, &pi) ) return 0;
420 if(pi.AP != AP_KRW_ONLY && pi.AP != AP_KRO_ONLY)
423 while( Size >= PAGE_SIZE )
425 if( MM_int_GetPageInfo(Addr, &pi) )
427 if(bUser && (pi.AP == AP_KRW_ONLY || pi.AP == AP_KRO_ONLY))
436 int MM_Map(tVAddr VAddr, tPAddr PAddr)
438 tMM_PageInfo pi = {0};
440 Log("MM_Map %P=>%p", PAddr, VAddr);
445 if(VAddr < USER_STACK_TOP)
448 pi.AP = AP_KRW_ONLY; // Kernel Read/Write
450 if( MM_int_SetPageInfo(VAddr, &pi) ) {
451 // MM_DerefPhys(pi.PhysAddr);
457 tPAddr MM_Allocate(tVAddr VAddr)
459 tMM_PageInfo pi = {0};
461 ENTER("pVAddr", VAddr);
463 pi.PhysAddr = MM_AllocPhys();
464 if( pi.PhysAddr == 0 ) LEAVE_RET('i', 0);
466 if(VAddr < USER_STACK_TOP)
471 if( MM_int_SetPageInfo(VAddr, &pi) ) {
472 MM_DerefPhys(pi.PhysAddr);
476 LEAVE('x', pi.PhysAddr);
480 tPAddr MM_AllocateZero(tVAddr VAddr)
482 if( !giMM_ZeroPage ) {
483 giMM_ZeroPage = MM_Allocate(VAddr);
484 MM_RefPhys(giMM_ZeroPage);
485 memset((void*)VAddr, 0, PAGE_SIZE);
488 MM_RefPhys(giMM_ZeroPage);
489 MM_Map(VAddr, giMM_ZeroPage);
491 MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
492 return giMM_ZeroPage;
495 void MM_Deallocate(tVAddr VAddr)
499 if( MM_int_GetPageInfo(VAddr, &pi) ) return ;
500 if( pi.PhysAddr == 0 ) return;
501 MM_DerefPhys(pi.PhysAddr);
506 MM_int_SetPageInfo(VAddr, &pi);
509 tPAddr MM_AllocateRootTable(void)
513 ret = MM_AllocPhysRange(2, -1);
516 MM_DerefPhys(ret+0x1000);
517 ret = MM_AllocPhysRange(3, -1);
521 // Log("MM_AllocateRootTable: Second try not aligned, %P", ret);
524 MM_DerefPhys(ret + 0x2000);
525 // Log("MM_AllocateRootTable: Second try aligned, %P", ret);
529 // Log("MM_AllocateRootTable: Got it in one, %P", ret);
533 void MM_int_CloneTable(Uint32 *DestEnt, int Table)
537 Uint32 *cur = (void*)MM_TABLE1USER;
538 // Uint32 *cur = &FRACTAL(MM_TABLE1USER,0);
541 table = MM_AllocPhys();
546 tmp_map = MM_MapTemp(table);
548 for( i = 0; i < 1024; i ++ )
550 // Log_Debug("MMVirt", "cur[%i] (%p) = %x", Table*256+i, &cur[Table*256+i], cur[Table*256+i]);
553 case 0: tmp_map[i] = 0; break;
556 Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable (%p)", (Table*256+i)*0x1000);
563 // Debug("%p cur[%i] & 0x230 = 0x%x", Table*256*0x1000, i, cur[i] & 0x230);
564 if( (cur[i] & 0x230) == 0x010 )
568 newpage = MM_AllocPhys();
569 src = (void*)( (Table*256+i)*0x1000 );
570 dst = MM_MapTemp(newpage);
571 // Debug("Taking a copy of kernel page %p (%P)", src, cur[i] & ~0xFFF);
572 memcpy(dst, src, PAGE_SIZE);
574 tmp_map[i] = newpage | (cur[i] & 0xFFF);
578 if( (cur[i] & 0x230) == 0x030 )
579 cur[i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO)
581 MM_RefPhys( tmp_map[i] & ~0xFFF );
586 MM_FreeTemp( tmp_map );
588 DestEnt[0] = table + 0*0x400 + 1;
589 DestEnt[1] = table + 1*0x400 + 1;
590 DestEnt[2] = table + 2*0x400 + 1;
591 DestEnt[3] = table + 3*0x400 + 1;
594 tPAddr MM_Clone(void)
597 Uint32 *new_lvl1_1, *new_lvl1_2, *cur;
601 // MM_DumpTables(0, KERNEL_BASE);
603 ret = MM_AllocateRootTable();
605 cur = (void*)MM_TABLE0USER;
606 new_lvl1_1 = MM_MapTemp(ret);
607 new_lvl1_2 = MM_MapTemp(ret+0x1000);
608 tmp_map = new_lvl1_1;
609 for( i = 0; i < 0x800-4; i ++ )
611 // HACK! Ignore the original identity mapping
612 if( i == 0 && Threads_GetTID() == 0 ) {
617 tmp_map = &new_lvl1_2[-0x400];
620 case 0: tmp_map[i] = 0; break;
622 MM_int_CloneTable(&tmp_map[i], i);
623 i += 3; // Tables are alocated in blocks of 4
627 Log_Error("MMVirt", "TODO: Support Sections/Supersections in MM_Clone (i=%i)", i);
633 // Allocate Fractal table
636 tPAddr tmp = MM_AllocPhys();
637 Uint32 *table = MM_MapTemp(tmp);
639 register Uint32 __SP asm("sp");
641 // Map table to last 4MiB of user space
642 new_lvl1_2[0x3FC] = tmp + 0*0x400 + 1;
643 new_lvl1_2[0x3FD] = tmp + 1*0x400 + 1;
644 new_lvl1_2[0x3FE] = tmp + 2*0x400 + 1;
645 new_lvl1_2[0x3FF] = tmp + 3*0x400 + 1;
647 tmp_map = new_lvl1_1;
648 for( j = 0; j < 512; j ++ )
651 tmp_map = &new_lvl1_2[-0x400];
652 if( (tmp_map[j*4] & 3) == 1 )
654 table[j] = tmp_map[j*4] & PADDR_MASK_LVL1;// 0xFFFFFC00;
655 table[j] |= 0x813; // nG, Kernel Only, Small page, XN
661 table[j++] = (ret + 0x0000) | 0x813;
662 table[j++] = (ret + 0x1000) | 0x813;
664 for( ; j < 1024; j ++ )
667 // Get kernel stack bottom
668 sp = __SP & ~(MM_KSTACK_SIZE-1);
669 j = (sp / 0x1000) % 1024;
670 num = MM_KSTACK_SIZE/0x1000;
672 // Log("num = %i, sp = %p, j = %i", num, sp, j);
675 for(; num--; j ++, sp += 0x1000)
680 page = MM_AllocPhys();
681 // Log("page = %P", page);
682 table[j] = page | 0x813;
684 tmp_page = MM_MapTemp(page);
685 memcpy(tmp_page, (void*)sp, 0x1000);
686 MM_FreeTemp( tmp_page );
689 MM_FreeTemp( table );
692 MM_FreeTemp( new_lvl1_1 );
693 MM_FreeTemp( new_lvl1_2 );
695 // Log("MM_Clone: ret = %P", ret);
700 void MM_ClearUser(void)
703 const int user_table_count = USER_STACK_TOP / (256*0x1000);
704 Uint32 *cur = (void*)MM_TABLE0USER;
707 // MM_DumpTables(0, 0x80000000);
709 // Log("user_table_count = %i (as opposed to %i)", user_table_count, 0x800-4);
711 for( i = 0; i < user_table_count; i ++ )
715 case 0: break; // Already unmapped
717 tab = (void*)(MM_TABLE1USER + i*256*sizeof(Uint32));
718 for( j = 0; j < 1024; j ++ )
722 case 0: break; // Unmapped
724 Log_Error("MMVirt", "TODO: Support large pages in MM_ClearUser");
728 MM_DerefPhys( tab[j] & ~(PAGE_SIZE-1) );
732 MM_DerefPhys( cur[i] & ~(PAGE_SIZE-1) );
740 Log_Error("MMVirt", "TODO: Implement sections/supersections in MM_ClearUser");
746 // Final block of 4 tables are KStack
749 // Clear out unused stacks
751 register Uint32 __SP asm("sp");
752 int cur_stack_base = ((__SP & ~(MM_KSTACK_SIZE-1)) / PAGE_SIZE) % 1024;
754 tab = (void*)(MM_TABLE1USER + i*256*sizeof(Uint32));
756 // First 512 is the Table1 mapping + 2 for Table0 mapping
757 for( j = 512+2; j < 1024; j ++ )
759 // Skip current stack
760 if( j == cur_stack_base ) {
761 j += (MM_KSTACK_SIZE / PAGE_SIZE) - 1;
764 if( !(tab[j] & 3) ) continue;
765 ASSERT( (tab[j] & 3) == 2 );
766 MM_DerefPhys( tab[j] & ~(PAGE_SIZE) );
772 // MM_DumpTables(0, 0x80000000);
775 void *MM_MapTemp(tPAddr PAddr)
780 for( ret = MM_TMPMAP_BASE; ret < MM_TMPMAP_END - PAGE_SIZE; ret += PAGE_SIZE )
782 if( MM_int_GetPageInfo(ret, &pi) == 0 )
785 // Log("MapTemp %P at %p by %p", PAddr, ret, __builtin_return_address(0));
786 MM_RefPhys(PAddr); // Counter the MM_Deallocate in FreeTemp
791 Log_Warning("MMVirt", "MM_MapTemp: All slots taken");
795 void MM_FreeTemp(void *Ptr)
797 tVAddr VAddr = (tVAddr)Ptr;
798 if( VAddr < MM_TMPMAP_BASE || VAddr >= MM_TMPMAP_END ) {
799 Log_Warning("MMVirt", "MM_FreeTemp: Passed an addr not from MM_MapTemp (%p)", VAddr);
803 MM_Deallocate(VAddr);
806 tVAddr MM_MapHWPages(tPAddr PAddr, Uint NPages)
812 ENTER("xPAddr iNPages", PAddr, NPages);
814 // Scan for a location
815 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_END - NPages * PAGE_SIZE; ret += PAGE_SIZE )
817 // LOG("checking %p", ret);
818 // Check if there is `NPages` free pages
819 for( i = 0; i < NPages; i ++ )
821 if( MM_int_GetPageInfo(ret + i*PAGE_SIZE, &pi) == 0 )
824 // Nope, jump to after the used page found and try again
825 // LOG("i = %i, ==? %i", i, NPages);
827 ret += i * PAGE_SIZE;
832 for( i = 0; i < NPages; i ++ )
833 MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAGE_SIZE);
838 Log_Warning("MMVirt", "MM_MapHWPages: No space for a %i page block", NPages);
843 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PAddr)
848 phys = MM_AllocPhysRange(Pages, MaxBits);
850 Log_Warning("MMVirt", "No space left for a %i page block (MM_AllocDMA)", Pages);
854 ret = MM_MapHWPages(phys, Pages);
860 void MM_UnmapHWPages(tVAddr Vaddr, Uint Number)
862 Log_Error("MMVirt", "TODO: Implement MM_UnmapHWPages");
865 tVAddr MM_NewKStack(int bShared)
867 tVAddr min_addr, max_addr;
871 min_addr = MM_GLOBALSTACKS;
872 max_addr = MM_GLOBALSTACKS_END;
875 min_addr = MM_KSTACK_BASE;
876 max_addr = MM_KSTACK_END;
879 // Locate a free slot
880 for( addr = min_addr; addr < max_addr; addr += MM_KSTACK_SIZE )
883 if( MM_int_GetPageInfo(addr+MM_KSTACK_SIZE-PAGE_SIZE, &pi) ) break;
886 // Check for an error
887 if(addr >= max_addr) {
892 for( ofs = PAGE_SIZE; ofs < MM_KSTACK_SIZE; ofs += PAGE_SIZE )
894 if( MM_Allocate(addr + ofs) == 0 )
899 MM_Deallocate(addr + ofs);
901 Log_Warning("MMVirt", "MM_NewKStack: Unable to allocate");
908 tVAddr MM_NewUserStack(void)
912 addr = USER_STACK_TOP - USER_STACK_SIZE;
913 if( MM_GetPhysAddr( (void*)(addr + PAGE_SIZE) ) ) {
914 Log_Error("MMVirt", "Unable to create initial user stack, addr %p taken",
921 for( ofs = PAGE_SIZE; ofs < USER_STACK_SIZE; ofs += PAGE_SIZE )
924 if(ofs >= USER_STACK_SIZE - USER_STACK_COMM)
925 rv = MM_Allocate(addr + ofs);
927 rv = MM_AllocateZero(addr + ofs);
933 MM_Deallocate(addr + ofs);
935 Log_Warning("MMVirt", "MM_NewUserStack: Unable to allocate");
938 MM_SetFlags(addr+ofs, 0, MM_PFLAG_KERNEL);
940 // Log("Return %p", addr + ofs);
941 // MM_DumpTables(0, 0x80000000);
945 void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info)
947 if( giMM_ZeroPage && Info->PhysAddr == giMM_ZeroPage )
949 Debug("%p => %8s - 0x%7x D%i %x %s %s",
951 Info->Domain, Info->AP,
952 Info->bExecutable ? " X" : "nX",
953 Info->bGlobal ? " G" : "nG"
958 Debug("%p => %8x - 0x%7x D%i %x %s %s",
959 Start, Info->PhysAddr-Len, Len,
960 Info->Domain, Info->AP,
961 Info->bExecutable ? " X" : "nX",
962 Info->bGlobal ? " G" : "nG"
967 void MM_DumpTables(tVAddr Start, tVAddr End)
969 tVAddr range_start = 0, addr;
970 tMM_PageInfo pi, pi_old;
971 int i = 0, inRange=0;
973 memset(&pi_old, 0, sizeof(pi_old));
975 Debug("Page Table Dump (%p to %p):", Start, End);
977 for( addr = Start; i == 0 || (addr && addr < End); i = 1 )
980 // Log("addr = %p", addr);
981 rv = MM_int_GetPageInfo(addr, &pi);
983 || pi.Size != pi_old.Size
984 || pi.Domain != pi_old.Domain
985 || pi.AP != pi_old.AP
986 || pi.bGlobal != pi_old.bGlobal
987 || pi_old.PhysAddr != pi.PhysAddr )
990 MM_int_DumpTableEnt(range_start, addr - range_start, &pi_old);
992 addr &= ~((1 << pi.Size)-1);
997 // Handle the zero page
998 if( !giMM_ZeroPage || pi_old.Size != 12 || pi_old.PhysAddr != giMM_ZeroPage )
999 pi_old.PhysAddr += 1 << pi_old.Size;
1000 addr += 1 << pi_old.Size;
1001 inRange = (rv == 0);
1004 MM_int_DumpTableEnt(range_start, addr - range_start, &pi);
1008 // NOTE: Runs in abort context, not much difference, just a smaller stack
1009 void MM_PageFault(Uint32 PC, Uint32 Addr, Uint32 DFSR, int bPrefetch, Uint32 UserLR)
1014 rv = MM_int_GetPageInfo(Addr, &pi);
1017 if( rv == 0 && pi.AP == AP_RO_BOTH )
1020 if( giMM_ZeroPage && pi.PhysAddr == giMM_ZeroPage )
1023 newpage = MM_AllocPhys();
1025 Log_Error("MMVirt", "Unable to allocate new page for COW of ZERO");
1030 Log_Notice("MMVirt", "COW %p caused by %p, ZERO duped to %P (RefCnt(%i)--)", Addr, PC,
1031 newpage, MM_GetRefCount(pi.PhysAddr));
1034 MM_DerefPhys(pi.PhysAddr);
1035 pi.PhysAddr = newpage;
1037 MM_int_SetPageInfo(Addr, &pi);
1039 memset( (void*)(Addr & ~(PAGE_SIZE-1)), 0, PAGE_SIZE );
1043 else if( MM_GetRefCount(pi.PhysAddr) > 1 )
1045 // Duplicate the page
1049 newpage = MM_AllocPhys();
1051 Log_Error("MMVirt", "Unable to allocate new page for COW");
1054 dst = MM_MapTemp(newpage);
1055 src = (void*)(Addr & ~(PAGE_SIZE-1));
1056 memcpy( dst, src, PAGE_SIZE );
1060 Log_Notice("MMVirt", "COW %p caused by %p, %P duped to %P (RefCnt(%i)--)", Addr, PC,
1061 pi.PhysAddr, newpage, MM_GetRefCount(pi.PhysAddr));
1064 MM_DerefPhys(pi.PhysAddr);
1065 pi.PhysAddr = newpage;
1069 Log_Notice("MMVirt", "COW %p caused by %p, took last reference to %P",
1070 Addr, PC, pi.PhysAddr);
1075 MM_int_SetPageInfo(Addr, &pi);
1080 Log_Error("MMVirt", "Code at %p accessed %p (DFSR = 0x%x)%s", PC, Addr, DFSR,
1081 (bPrefetch ? " - Prefetch" : "")
1083 Log_Error("MMVirt", "- User LR = 0x%x", UserLR);
1084 const char * const dfsr_errors[] = {
1085 /* 00000 */ "-", "Alignment Fault",
1086 /* 00010 */ "Debug event", "Access Flag (Section)",
1087 /* 00100 */ "Instr Cache Maint", "Translation (Section)",
1088 /* 00110 */ "Access Flag (Page)", "Translation (Page)",
1089 /* 01000 */ "Sync. External abort", "Domain (Section)",
1090 /* 01010 */ "-", "Domain (Page)",
1091 /* 01100 */ "Table Walk sync ext (lvl 1)", "Permission (Section)",
1092 /* 01110 */ "Table Walk sync ext (lvl 2)", "Permission (Page)",
1094 /* 10000 */ "-", "-",
1095 /* 10010 */ "-", "-",
1096 /* 10100 */ "IMPL (Lockdown)", "-",
1097 /* 10110 */ "Async. Extern. Abort", "-",
1098 /* 11000 */ "Mem. access async pairity error", "Mem. access async pairity error",
1099 /* 11010 */ "IMPL (Coprocessor abort)", "-",
1100 /* 11100 */ "Table Walk Sync parity (lvl 1)", "-",
1101 /* 11110 */ "Table Walk Sync parity (lvl 2)", "-"
1103 int errcode = (DFSR & 0xF) | (((DFSR >> 10) & 1) << 4);
1104 Log_Error("MMVirt", "- Errcode 0b%05b", errcode);
1105 Log_Error("MMVirt", "- Dom %i %s %s",
1106 (DFSR >> 4) & 0xF, (DFSR & 0x800 ? "Write": "Read"),
1107 dfsr_errors[errcode]
1109 Log_Error("MMVirt", "- AP=%i(%s) %s", pi.AP, caAPValueNames[pi.AP], pi.bExecutable ? " Executable":"");
1110 if( Addr < 0x80000000 )
1111 MM_DumpTables(0, 0x80000000);
1113 MM_DumpTables(0x80000000, -1);