4 * ARM7 Virtual Memory Manager
5 * - arch/arm7/mm_virt.c
14 #define AP_KRW_ONLY 1 // Kernel page
15 #define AP_KRO_ONLY 5 // Kernel RO page
16 #define AP_RW_BOTH 3 // Standard RW
17 #define AP_RO_BOTH 7 // COW Page
18 #define AP_RO_USER 2 // User RO Page
19 #define PADDR_MASK_LVL1 0xFFFFFC00
22 extern Uint32 kernel_table0[];
36 //#define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>20)])
37 #define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>22)])
38 #define USRFRACTAL(addr) (*((Uint32*)(0x7FDFF000) + ((addr)>>22)))
39 #define TLBIALL() __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0))
40 #define TLBIMVA(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 1" : : "r" (addr))
43 void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1);
44 int MM_int_AllocateCoarse(tVAddr VAddr, int Domain);
45 int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
46 int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
47 tVAddr MM_NewUserStack(void);
48 tPAddr MM_AllocateZero(tVAddr VAddr);
49 tPAddr MM_AllocateRootTable(void);
50 void MM_int_CloneTable(Uint32 *DestEnt, int Table);
51 tPAddr MM_Clone(void);
52 tVAddr MM_NewKStack(int bGlobal);
53 void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info);
54 //void MM_DumpTables(tVAddr Start, tVAddr End);
55 void MM_PageFault(Uint32 PC, Uint32 Addr, Uint32 DFSR, int bPrefetch);
61 int MM_InitialiseVirtual(void)
66 void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1)
68 if(VAddr & 0x80000000) {
69 *Table0 = (void*)&kernel_table0; // Level 0
70 *Table1 = (void*)MM_TABLE1KERN; // Level 1
73 *Table0 = (void*)MM_TABLE0USER;
74 *Table1 = (void*)MM_TABLE1USER;
78 int MM_int_AllocateCoarse(tVAddr VAddr, int Domain)
80 Uint32 *table0, *table1;
84 ENTER("xVAddr iDomain", VAddr, Domain);
86 MM_int_GetTables(VAddr, &table0, &table1);
88 VAddr &= ~(0x400000-1); // 4MiB per "block", 1 Page
90 desc = &table0[ VAddr>>20];
91 LOG("desc = %p", desc);
93 // table0: 4 bytes = 1 MiB
95 LOG("desc[0] = %x", desc[0]);
96 LOG("desc[1] = %x", desc[1]);
97 LOG("desc[2] = %x", desc[2]);
98 LOG("desc[3] = %x", desc[3]);
100 if( (desc[0] & 3) != 0 || (desc[1] & 3) != 0
101 || (desc[2] & 3) != 0 || (desc[3] & 3) != 0 )
108 paddr = MM_AllocPhys();
116 *desc = paddr | (Domain << 5) | 1;
117 desc[1] = desc[0] + 0x400;
118 desc[2] = desc[0] + 0x800;
119 desc[3] = desc[0] + 0xC00;
121 if( VAddr < 0x80000000 ) {
122 USRFRACTAL(VAddr) = paddr | 0x13;
125 FRACTAL(table1, VAddr) = paddr | 0x13;
135 int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi)
137 Uint32 *table0, *table1;
140 ENTER("pVAddr ppi", VAddr, pi);
142 MM_int_GetTables(VAddr, &table0, &table1);
144 desc = &table0[ VAddr >> 20 ];
145 LOG("desc = %p", desc);
149 case 12: // Small Page
150 case 16: // Large Page
152 if( (*desc & 3) == 0 ) {
153 MM_int_AllocateCoarse( VAddr, pi->Domain );
155 desc = &table1[ VAddr >> 12 ];
156 LOG("desc (2) = %p", desc);
160 // - Error if overwriting a large page
161 if( (*desc & 3) == 1 ) LEAVE_RET('i', 1);
162 if( pi->PhysAddr == 0 ) {
168 *desc = (pi->PhysAddr & 0xFFFFF000) | 2;
169 if(!pi->bExecutable) *desc |= 1; // XN
170 if(!pi->bGlobal) *desc |= 1 << 11; // nG
171 if( pi->bShared) *desc |= 1 << 10; // S
172 *desc |= (pi->AP & 3) << 4; // AP
173 *desc |= ((pi->AP >> 2) & 1) << 9; // APX
174 TLBIMVA(VAddr & 0xFFFFF000);
181 Log_Warning("MMVirt", "TODO: Implement large pages in MM_int_SetPageInfo");
184 case 20: // Section or unmapped
185 Log_Warning("MMVirt", "TODO: Implement sections in MM_int_SetPageInfo");
187 case 24: // Supersection
188 // Error if not aligned
189 if( VAddr & 0xFFFFFF ) {
193 if( (*desc & 3) == 0 || ((*desc & 3) == 2 && (*desc & (1 << 18))) )
195 if( pi->PhysAddr == 0 ) {
200 *desc = pi->PhysAddr & 0xFF000000;
201 // *desc |= ((pi->PhysAddr >> 32) & 0xF) << 20;
202 // *desc |= ((pi->PhysAddr >> 36) & 0x7) << 5;
203 *desc |= 2 | (1 << 18);
205 // TODO: Apply to all entries
206 Log_Warning("MMVirt", "TODO: Apply changes to all entries of supersections");
211 Log_Warning("MMVirt", "TODO: 24-bit not on supersection?");
220 int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi)
222 Uint32 *table0, *table1;
225 // LogF("MM_int_GetPageInfo: VAddr=%p, pi=%p\n", VAddr, pi);
227 MM_int_GetTables(VAddr, &table0, &table1);
229 desc = table0[ VAddr >> 20 ];
231 // if( VAddr > 0x90000000)
232 // LOG("table0 desc(%p) = %x", &table0[ VAddr >> 20 ], desc);
248 // 1: Coarse page table
250 // Domain from top level table
251 pi->Domain = (desc >> 5) & 7;
253 desc = table1[ VAddr >> 12 ];
254 // LOG("table1 desc(%p) = %x", &table1[ VAddr >> 12 ], desc);
261 // 1: Large Page (64KiB)
264 pi->PhysAddr = desc & 0xFFFF0000;
265 pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2);
266 pi->bExecutable = !(desc & 0x8000);
267 pi->bShared = (desc >> 10) & 1;
273 pi->PhysAddr = desc & 0xFFFFF000;
274 pi->bExecutable = !(desc & 1);
275 pi->bGlobal = !(desc >> 11);
276 pi->bShared = (desc >> 10) & 1;
277 pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2);
282 // 2: Section (or Supersection)
284 if( desc & (1 << 18) ) {
286 pi->PhysAddr = desc & 0xFF000000;
287 pi->PhysAddr |= (Uint64)((desc >> 20) & 0xF) << 32;
288 pi->PhysAddr |= (Uint64)((desc >> 5) & 0x7) << 36;
290 pi->Domain = 0; // Supersections default to zero
291 pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2);
296 pi->PhysAddr = desc & 0xFFF80000;
298 pi->Domain = (desc >> 5) & 7;
299 pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2);
302 // 3: Reserved (invalid)
313 tPAddr MM_GetPhysAddr(tVAddr VAddr)
316 if( MM_int_GetPageInfo(VAddr, &pi) )
318 return pi.PhysAddr | (VAddr & ((1 << pi.Size)-1));
321 Uint MM_GetFlags(tVAddr VAddr)
326 if( MM_int_GetPageInfo(VAddr, &pi) )
336 ret |= MM_PFLAG_KERNEL;
339 ret |= MM_PFLAG_KERNEL|MM_PFLAG_RO;
351 if( pi.bExecutable ) ret |= MM_PFLAG_EXEC;
355 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
360 if( MM_int_GetPageInfo(VAddr, &pi) )
363 curFlags = MM_GetFlags(VAddr);
364 if( (curFlags & Mask) == Flags )
369 if( curFlags & MM_PFLAG_COW )
373 switch(curFlags & (MM_PFLAG_KERNEL|MM_PFLAG_RO) )
376 pi.AP = AP_RW_BOTH; break;
377 case MM_PFLAG_KERNEL:
378 pi.AP = AP_KRW_ONLY; break;
380 pi.AP = AP_RO_USER; break;
381 case MM_PFLAG_KERNEL|MM_PFLAG_RO:
382 pi.AP = AP_KRO_ONLY; break;
386 pi.bExecutable = !!(curFlags & MM_PFLAG_EXEC);
388 MM_int_SetPageInfo(VAddr, &pi);
391 int MM_Map(tVAddr VAddr, tPAddr PAddr)
393 tMM_PageInfo pi = {0};
395 Log("MM_Map %P=>%p", PAddr, VAddr);
400 if(VAddr < USER_STACK_TOP)
403 pi.AP = AP_KRW_ONLY; // Kernel Read/Write
405 if( MM_int_SetPageInfo(VAddr, &pi) ) {
406 MM_DerefPhys(pi.PhysAddr);
412 tPAddr MM_Allocate(tVAddr VAddr)
414 tMM_PageInfo pi = {0};
416 ENTER("pVAddr", VAddr);
418 pi.PhysAddr = MM_AllocPhys();
419 if( pi.PhysAddr == 0 ) LEAVE_RET('i', 0);
421 if(VAddr < USER_STACK_TOP)
426 if( MM_int_SetPageInfo(VAddr, &pi) ) {
427 MM_DerefPhys(pi.PhysAddr);
431 LEAVE('x', pi.PhysAddr);
435 tPAddr MM_AllocateZero(tVAddr VAddr)
437 if( !giMM_ZeroPage ) {
438 giMM_ZeroPage = MM_Allocate(VAddr);
439 MM_RefPhys(giMM_ZeroPage);
440 memset((void*)VAddr, 0, PAGE_SIZE);
443 MM_RefPhys(giMM_ZeroPage);
444 MM_Map(VAddr, giMM_ZeroPage);
446 MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
447 return giMM_ZeroPage;
450 void MM_Deallocate(tVAddr VAddr)
454 if( MM_int_GetPageInfo(VAddr, &pi) ) return ;
456 if( pi.PhysAddr == 0 ) return;
457 MM_DerefPhys(pi.PhysAddr);
462 MM_int_SetPageInfo(VAddr, &pi);
465 tPAddr MM_AllocateRootTable(void)
469 ret = MM_AllocPhysRange(2, -1);
472 MM_DerefPhys(ret+0x1000);
473 ret = MM_AllocPhysRange(3, -1);
477 // Log("MM_AllocateRootTable: Second try not aligned, %P", ret);
480 MM_DerefPhys(ret + 0x2000);
481 // Log("MM_AllocateRootTable: Second try aligned, %P", ret);
485 // Log("MM_AllocateRootTable: Got it in one, %P", ret);
489 void MM_int_CloneTable(Uint32 *DestEnt, int Table)
493 Uint32 *cur = (void*)MM_TABLE1USER;
494 // Uint32 *cur = &FRACTAL(MM_TABLE1USER,0);
497 table = MM_AllocPhys();
502 tmp_map = (void*)MM_MapTemp(table);
504 for( i = 0; i < 1024; i ++ )
506 // Log_Debug("MMVirt", "cur[%i] (%p) = %x", Table*256+i, &cur[Table*256+i], cur[Table*256+i]);
509 case 0: tmp_map[i] = 0; break;
512 Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable (%p)", (Table*256+i)*0x1000);
519 // Debug("%p cur[%i] & 0x230 = 0x%x", Table*256*0x1000, i, cur[i] & 0x230);
520 if( (cur[i] & 0x230) == 0x010 )
524 newpage = MM_AllocPhys();
525 src = (void*)( (Table*256+i)*0x1000 );
526 dst = (void*)MM_MapTemp(newpage);
527 // Debug("Taking a copy of kernel page %p (%P)", src, cur[i] & ~0xFFF);
528 memcpy(dst, src, PAGE_SIZE);
529 MM_FreeTemp( (tVAddr)dst );
530 tmp_map[i] = newpage | (cur[i] & 0xFFF);
534 if( (cur[i] & 0x230) == 0x030 )
535 cur[i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO)
537 MM_RefPhys( tmp_map[i] & ~0xFFF );
542 MM_FreeTemp( (tVAddr) tmp_map );
544 DestEnt[0] = table + 0*0x400 + 1;
545 DestEnt[1] = table + 1*0x400 + 1;
546 DestEnt[2] = table + 2*0x400 + 1;
547 DestEnt[3] = table + 3*0x400 + 1;
550 tPAddr MM_Clone(void)
553 Uint32 *new_lvl1_1, *new_lvl1_2, *cur;
557 // MM_DumpTables(0, KERNEL_BASE);
559 ret = MM_AllocateRootTable();
561 cur = (void*)MM_TABLE0USER;
562 new_lvl1_1 = (void*)MM_MapTemp(ret);
563 new_lvl1_2 = (void*)MM_MapTemp(ret+0x1000);
564 tmp_map = new_lvl1_1;
565 for( i = 0; i < 0x800-4; i ++ )
567 // HACK! Ignore the original identity mapping
568 if( i == 0 && Threads_GetTID() == 0 ) {
573 tmp_map = &new_lvl1_2[-0x400];
576 case 0: tmp_map[i] = 0; break;
578 MM_int_CloneTable(&tmp_map[i], i);
579 i += 3; // Tables are alocated in blocks of 4
583 Log_Error("MMVirt", "TODO: Support Sections/Supersections in MM_Clone (i=%i)", i);
589 // Allocate Fractal table
592 tPAddr tmp = MM_AllocPhys();
593 Uint32 *table = (void*)MM_MapTemp(tmp);
595 register Uint32 __SP asm("sp");
597 // Map table to last 4MiB of user space
598 new_lvl1_2[0x3FC] = tmp + 0*0x400 + 1;
599 new_lvl1_2[0x3FD] = tmp + 1*0x400 + 1;
600 new_lvl1_2[0x3FE] = tmp + 2*0x400 + 1;
601 new_lvl1_2[0x3FF] = tmp + 3*0x400 + 1;
603 tmp_map = new_lvl1_1;
604 for( j = 0; j < 512; j ++ )
607 tmp_map = &new_lvl1_2[-0x400];
608 if( (tmp_map[j*4] & 3) == 1 )
610 table[j] = tmp_map[j*4] & PADDR_MASK_LVL1;// 0xFFFFFC00;
611 table[j] |= 0x813; // nG, Kernel Only, Small page, XN
617 table[j++] = (ret + 0x0000) | 0x813;
618 table[j++] = (ret + 0x1000) | 0x813;
620 for( ; j < 1024; j ++ )
623 // Get kernel stack bottom
624 sp = __SP & ~(MM_KSTACK_SIZE-1);
625 j = (sp / 0x1000) % 1024;
626 num = MM_KSTACK_SIZE/0x1000;
628 Log("num = %i, sp = %p, j = %i", num, sp, j);
631 for(; num--; j ++, sp += 0x1000)
636 page = MM_AllocPhys();
637 Log("page = %P", page);
638 table[j] = page | 0x813;
640 tmp_page = (void*)MM_MapTemp(page);
641 memcpy(tmp_page, (void*)sp, 0x1000);
642 MM_FreeTemp( (tVAddr) tmp_page );
645 MM_FreeTemp( (tVAddr)table );
648 MM_FreeTemp( (tVAddr)new_lvl1_1 );
649 MM_FreeTemp( (tVAddr)new_lvl1_2 );
654 void MM_ClearUser(void)
657 const int user_table_count = USER_STACK_TOP / (256*0x1000);
658 Uint32 *cur = (void*)MM_TABLE0USER;
661 // MM_DumpTables(0, 0x80000000);
663 Log("user_table_count = %i (as opposed to %i)", user_table_count, 0x800-4);
665 for( i = 0; i < user_table_count; i ++ )
669 case 0: break; // Already unmapped
671 tab = (void*)(MM_TABLE1USER + i*256*sizeof(Uint32));
672 for( j = 0; j < 1024; j ++ )
676 case 0: break; // Unmapped
678 Log_Error("MMVirt", "TODO: Support large pages in MM_ClearUser");
682 MM_DerefPhys( tab[j] & ~(PAGE_SIZE-1) );
686 MM_DerefPhys( cur[i] & ~(PAGE_SIZE-1) );
694 Log_Error("MMVirt", "TODO: Implement sections/supersections in MM_ClearUser");
700 // Final block of 4 tables are KStack
703 // Clear out unused stacks
705 register Uint32 __SP asm("sp");
706 int cur_stack_base = ((__SP & ~(MM_KSTACK_SIZE-1)) / PAGE_SIZE) % 1024;
708 tab = (void*)(MM_TABLE1USER + i*256*sizeof(Uint32));
710 // First 512 is the Table1 mapping + 2 for Table0 mapping
711 for( j = 512+2; j < 1024; j ++ )
713 // Skip current stack
714 if( j == cur_stack_base ) {
715 j += (MM_KSTACK_SIZE / PAGE_SIZE) - 1;
718 if( !(tab[j] & 3) ) continue;
719 ASSERT( (tab[j] & 3) == 2 );
720 MM_DerefPhys( tab[j] & ~(PAGE_SIZE) );
726 MM_DumpTables(0, 0x80000000);
727 // Log_KernelPanic("MMVirt", "TODO: Implement MM_ClearUser");
730 tVAddr MM_MapTemp(tPAddr PAddr)
735 for( ret = MM_TMPMAP_BASE; ret < MM_TMPMAP_END - PAGE_SIZE; ret += PAGE_SIZE )
737 if( MM_int_GetPageInfo(ret, &pi) == 0 )
740 Log("MapTemp %P at %p by %p", PAddr, ret, __builtin_return_address(0));
741 MM_RefPhys(PAddr); // Counter the MM_Deallocate in FreeTemp
746 Log_Warning("MMVirt", "MM_MapTemp: All slots taken");
750 void MM_FreeTemp(tVAddr VAddr)
752 if( VAddr < MM_TMPMAP_BASE || VAddr >= MM_TMPMAP_END ) {
753 Log_Warning("MMVirt", "MM_FreeTemp: Passed an addr not from MM_MapTemp (%p)", VAddr);
757 MM_Deallocate(VAddr);
760 tVAddr MM_MapHWPages(tPAddr PAddr, Uint NPages)
766 ENTER("xPAddr iNPages", PAddr, NPages);
768 // Scan for a location
769 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_END - NPages * PAGE_SIZE; ret += PAGE_SIZE )
771 // LOG("checking %p", ret);
772 // Check if there is `NPages` free pages
773 for( i = 0; i < NPages; i ++ )
775 if( MM_int_GetPageInfo(ret + i*PAGE_SIZE, &pi) == 0 )
778 // Nope, jump to after the used page found and try again
779 // LOG("i = %i, ==? %i", i, NPages);
781 ret += i * PAGE_SIZE;
786 for( i = 0; i < NPages; i ++ )
787 MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAGE_SIZE);
792 Log_Warning("MMVirt", "MM_MapHWPages: No space for a %i page block", NPages);
797 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PAddr)
802 phys = MM_AllocPhysRange(Pages, MaxBits);
804 Log_Warning("MMVirt", "No space left for a %i page block (MM_AllocDMA)", Pages);
808 ret = MM_MapHWPages(phys, Pages);
814 void MM_UnmapHWPages(tVAddr Vaddr, Uint Number)
816 Log_Error("MMVirt", "TODO: Implement MM_UnmapHWPages");
819 tVAddr MM_NewKStack(int bShared)
821 tVAddr min_addr, max_addr;
825 min_addr = MM_GLOBALSTACKS;
826 max_addr = MM_GLOBALSTACKS_END;
829 min_addr = MM_KSTACK_BASE;
830 max_addr = MM_KSTACK_END;
833 // Locate a free slot
834 for( addr = min_addr; addr < max_addr; addr += MM_KSTACK_SIZE )
837 if( MM_int_GetPageInfo(addr+MM_KSTACK_SIZE-PAGE_SIZE, &pi) ) break;
840 // Check for an error
841 if(addr >= max_addr) {
846 for( ofs = PAGE_SIZE; ofs < MM_KSTACK_SIZE; ofs += PAGE_SIZE )
848 if( MM_Allocate(addr + ofs) == 0 )
853 MM_Deallocate(addr + ofs);
855 Log_Warning("MMVirt", "MM_NewKStack: Unable to allocate");
862 tVAddr MM_NewUserStack(void)
866 addr = USER_STACK_TOP - USER_STACK_SIZE;
867 if( MM_GetPhysAddr(addr + PAGE_SIZE) ) {
868 Log_Error("MMVirt", "Unable to create initial user stack, addr %p taken",
875 for( ofs = PAGE_SIZE; ofs < USER_STACK_SIZE; ofs += PAGE_SIZE )
878 if(ofs >= USER_STACK_SIZE - USER_STACK_COMM)
879 rv = MM_Allocate(addr + ofs);
881 rv = MM_AllocateZero(addr + ofs);
887 MM_Deallocate(addr + ofs);
889 Log_Warning("MMVirt", "MM_NewUserStack: Unable to allocate");
892 MM_SetFlags(addr+ofs, 0, MM_PFLAG_KERNEL);
894 // Log("Return %p", addr + ofs);
895 // MM_DumpTables(0, 0x80000000);
899 void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info)
901 if( giMM_ZeroPage && Info->PhysAddr == giMM_ZeroPage )
903 Debug("%p => %8s - 0x%7x %i %x",
905 Info->Domain, Info->AP
910 Debug("%p => %8x - 0x%7x %i %x",
911 Start, Info->PhysAddr-Len, Len,
912 Info->Domain, Info->AP
917 void MM_DumpTables(tVAddr Start, tVAddr End)
919 tVAddr range_start = 0, addr;
920 tMM_PageInfo pi, pi_old;
921 int i = 0, inRange=0;
925 Debug("Page Table Dump:");
927 for( addr = Start; i == 0 || (addr && addr < End); i = 1 )
930 // Log("addr = %p", addr);
931 rv = MM_int_GetPageInfo(addr, &pi);
933 || pi.Size != pi_old.Size
934 || pi.Domain != pi_old.Domain
935 || pi.AP != pi_old.AP
936 || pi_old.PhysAddr != pi.PhysAddr )
939 MM_int_DumpTableEnt(range_start, addr - range_start, &pi_old);
941 addr &= ~((1 << pi.Size)-1);
946 // Handle the zero page
947 if( !giMM_ZeroPage || pi_old.Size != 12 || pi_old.PhysAddr != giMM_ZeroPage )
948 pi_old.PhysAddr += 1 << pi_old.Size;
949 addr += 1 << pi_old.Size;
953 MM_int_DumpTableEnt(range_start, addr - range_start, &pi);
957 // NOTE: Runs in abort context, not much differe, just a smaller stack
958 void MM_PageFault(Uint32 PC, Uint32 Addr, Uint32 DFSR, int bPrefetch)
963 rv = MM_int_GetPageInfo(Addr, &pi);
966 if( rv == 0 && pi.AP == AP_RO_BOTH )
969 if( MM_GetRefCount(pi.PhysAddr) > 1 )
971 // Duplicate the page
975 newpage = MM_AllocPhys();
977 Log_Error("MMVirt", "Unable to allocate new page for COW");
980 dst = (void*)MM_MapTemp(newpage);
981 src = (void*)(Addr & ~(PAGE_SIZE-1));
982 memcpy( dst, src, PAGE_SIZE );
983 MM_FreeTemp( (tVAddr)dst );
985 Log_Notice("MMVirt", "COW %p caused by %p, %P duped to %P", Addr, PC,
986 pi.PhysAddr, newpage);
988 MM_DerefPhys(pi.PhysAddr);
989 pi.PhysAddr = newpage;
992 Log_Notice("MMVirt", "COW %p caused by %p, took last reference to %P", Addr, PC, pi.PhysAddr);
996 MM_int_SetPageInfo(Addr, &pi);
1001 Log_Error("MMVirt", "Code at %p accessed %p (DFSR = 0x%x)%s", PC, Addr, DFSR,
1002 (bPrefetch ? " - Prefetch" : "")