#define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>22)])
#define USRFRACTAL(addr) (*((Uint32*)(0x7FDFF000) + ((addr)>>22)))
#define TLBIALL() __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0))
-#define TLBIMVA(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 1" : : "r" (addr))
+#define TLBIMVA(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 1;dsb;isb" : : "r" ((addr)&~0xFFF):"memory")
+#define DCCMVAC(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 1" : : "r" ((addr)&~0xFFF))
// === PROTOTYPES ===
void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1);
tVAddr MM_NewKStack(int bGlobal);
void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info);
//void MM_DumpTables(tVAddr Start, tVAddr End);
+void MM_PageFault(Uint32 PC, Uint32 Addr, Uint32 DFSR, int bPrefetch);
// === GLOBALS ===
tPAddr giMM_ZeroPage;
desc[3] = desc[0] + 0xC00;
if( VAddr < 0x80000000 ) {
-// Log("USRFRACTAL(%p) = %p", VAddr, &USRFRACTAL(VAddr));
USRFRACTAL(VAddr) = paddr | 0x13;
}
else {
-// Log("FRACTAL(%p) = %p", VAddr, &FRACTAL(table1, VAddr));
FRACTAL(table1, VAddr) = paddr | 0x13;
}
// TLBIALL
- TLBIALL();
+ TLBIALL();
+
+ memset( (void*)&table1[ (VAddr >> 12) & ~(1024-1) ], 0, 0x1000 );
LEAVE('i', 0);
return 0;
if( (*desc & 3) == 1 ) LEAVE_RET('i', 1);
if( pi->PhysAddr == 0 ) {
*desc = 0;
+ TLBIMVA( VAddr );
+ DCCMVAC( (tVAddr) desc );
+ #warning "HACK: TLBIALL"
+ TLBIALL();
LEAVE('i', 0);
return 0;
}
*desc = (pi->PhysAddr & 0xFFFFF000) | 2;
if(!pi->bExecutable) *desc |= 1; // XN
- if(!pi->bGlobal) *desc |= 1 << 11; // NG
+ if(!pi->bGlobal) *desc |= 1 << 11; // nG
if( pi->bShared) *desc |= 1 << 10; // S
*desc |= (pi->AP & 3) << 4; // AP
*desc |= ((pi->AP >> 2) & 1) << 9; // APX
- TLBIMVA(VAddr & 0xFFFFF000);
+ TLBIMVA( VAddr );
+ #warning "HACK: TLBIALL"
+ TLBIALL();
+ DCCMVAC( (tVAddr) desc );
LEAVE('i', 0);
return 0;
}
if( MM_int_GetPageInfo(VAddr, &pi) )
return ;
- curFlags = MM_GetPhysAddr(VAddr);
+ curFlags = MM_GetFlags(VAddr);
if( (curFlags & Mask) == Flags )
return ;
curFlags &= ~Mask;
MM_int_SetPageInfo(VAddr, &pi);
}
+int MM_IsValidBuffer(tVAddr Addr, size_t Size)
+{
+ tMM_PageInfo pi;
+ int bUser = 0;
+
+ Size += Addr & (PAGE_SIZE-1);
+ Addr &= ~(PAGE_SIZE-1);
+
+ if( MM_int_GetPageInfo(Addr, &pi) ) return 0;
+ Addr += PAGE_SIZE;
+
+ if(pi.AP != AP_KRW_ONLY && pi.AP != AP_KRO_ONLY)
+ bUser = 1;
+
+ while( Size >= PAGE_SIZE )
+ {
+ if( MM_int_GetPageInfo(Addr, &pi) )
+ return 0;
+ if(bUser && (pi.AP == AP_KRW_ONLY || pi.AP == AP_KRO_ONLY))
+ return 0;
+ Addr += PAGE_SIZE;
+ Size -= PAGE_SIZE;
+ }
+
+ return 1;
+}
+
int MM_Map(tVAddr VAddr, tPAddr PAddr)
{
tMM_PageInfo pi = {0};
pi.AP = AP_KRW_ONLY; // Kernel Read/Write
pi.bExecutable = 1;
if( MM_int_SetPageInfo(VAddr, &pi) ) {
- MM_DerefPhys(pi.PhysAddr);
+// MM_DerefPhys(pi.PhysAddr);
return 0;
}
return pi.PhysAddr;
tMM_PageInfo pi;
if( MM_int_GetPageInfo(VAddr, &pi) ) return ;
-
if( pi.PhysAddr == 0 ) return;
MM_DerefPhys(pi.PhysAddr);
case 3:
// Small page
// - If full RW
- Debug("%p cur[%i] & 0x230 = 0x%x", Table*256*0x1000, i, cur[i] & 0x230);
- if( (cur[i] & 0x230) == 0x030 )
- cur[i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO)
- tmp_map[i] = cur[i];
+// Debug("%p cur[%i] & 0x230 = 0x%x", Table*256*0x1000, i, cur[i] & 0x230);
+ if( (cur[i] & 0x230) == 0x010 )
+ {
+ void *dst, *src;
+ tPAddr newpage;
+ newpage = MM_AllocPhys();
+ src = (void*)( (Table*256+i)*0x1000 );
+ dst = (void*)MM_MapTemp(newpage);
+// Debug("Taking a copy of kernel page %p (%P)", src, cur[i] & ~0xFFF);
+ memcpy(dst, src, PAGE_SIZE);
+ MM_FreeTemp( (tVAddr)dst );
+ tmp_map[i] = newpage | (cur[i] & 0xFFF);
+ }
+ else
+ {
+ if( (cur[i] & 0x230) == 0x030 )
+ cur[i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO)
+ tmp_map[i] = cur[i];
+ MM_RefPhys( tmp_map[i] & ~0xFFF );
+ }
break;
}
}
j = (sp / 0x1000) % 1024;
num = MM_KSTACK_SIZE/0x1000;
- Log("num = %i, sp = %p, j = %i", num, sp, j);
+// Log("num = %i, sp = %p, j = %i", num, sp, j);
// Copy stack pages
for(; num--; j ++, sp += 0x1000)
void *tmp_page;
page = MM_AllocPhys();
- Log("page = %P", page);
+// Log("page = %P", page);
table[j] = page | 0x813;
tmp_page = (void*)MM_MapTemp(page);
memcpy(tmp_page, (void*)sp, 0x1000);
MM_FreeTemp( (tVAddr) tmp_page );
}
-
+
MM_FreeTemp( (tVAddr)table );
}
MM_FreeTemp( (tVAddr)new_lvl1_1 );
MM_FreeTemp( (tVAddr)new_lvl1_2 );
+// Log("MM_Clone: ret = %P", ret);
+
return ret;
}
void MM_ClearUser(void)
{
int i, j;
+ const int user_table_count = USER_STACK_TOP / (256*0x1000);
Uint32 *cur = (void*)MM_TABLE0USER;
Uint32 *tab;
// MM_DumpTables(0, 0x80000000);
- for( i = 0; i < 0x800-4; i ++ )
+// Log("user_table_count = %i (as opposed to %i)", user_table_count, 0x800-4);
+
+ for( i = 0; i < user_table_count; i ++ )
{
switch( cur[i] & 3 )
{
cur[i] = 0;
}
+ // Final block of 4 tables are KStack
+ i = 0x800 - 4;
+
// Clear out unused stacks
{
register Uint32 __SP asm("sp");
}
- MM_DumpTables(0, 0x80000000);
-// Log_KernelPanic("MMVirt", "TODO: Implement MM_ClearUser");
+// MM_DumpTables(0, 0x80000000);
}
tVAddr MM_MapTemp(tPAddr PAddr)
if( MM_int_GetPageInfo(ret, &pi) == 0 )
continue;
- Log("MapTemp %P at %p by %p", PAddr, ret, __builtin_return_address(0));
+// Log("MapTemp %P at %p by %p", PAddr, ret, __builtin_return_address(0));
MM_RefPhys(PAddr); // Counter the MM_Deallocate in FreeTemp
MM_Map(ret, PAddr);
}
MM_SetFlags(addr+ofs, 0, MM_PFLAG_KERNEL);
}
- Log("Return %p", addr + ofs);
- MM_DumpTables(0, 0x80000000);
+// Log("Return %p", addr + ofs);
+// MM_DumpTables(0, 0x80000000);
return addr + ofs;
}
int i = 0, inRange=0;
pi_old.Size = 0;
+ pi_old.AP = 0;
+ pi_old.PhysAddr = 0;
- Debug("Page Table Dump:");
+ Debug("Page Table Dump (%p to %p):", Start, End);
range_start = Start;
for( addr = Start; i == 0 || (addr && addr < End); i = 1 )
{
Debug("Done");
}
+// NOTE: Runs in abort context, not much difference, just a smaller stack
+void MM_PageFault(Uint32 PC, Uint32 Addr, Uint32 DFSR, int bPrefetch)
+{
+ int rv;
+ tMM_PageInfo pi;
+
+ rv = MM_int_GetPageInfo(Addr, &pi);
+
+ // Check for COW
+ if( rv == 0 && pi.AP == AP_RO_BOTH )
+ {
+ pi.AP = AP_RW_BOTH;
+ if( giMM_ZeroPage && pi.PhysAddr == giMM_ZeroPage )
+ {
+ tPAddr newpage;
+ newpage = MM_AllocPhys();
+ if( !newpage ) {
+ Log_Error("MMVirt", "Unable to allocate new page for COW of ZERO");
+ for(;;);
+ }
+
+ #if TRACE_COW
+ Log_Notice("MMVirt", "COW %p caused by %p, ZERO duped to %P (RefCnt(%i)--)", Addr, PC,
+ newpage, MM_GetRefCount(pi.PhysAddr));
+ #endif
+
+ MM_DerefPhys(pi.PhysAddr);
+ pi.PhysAddr = newpage;
+ pi.AP = AP_RW_BOTH;
+ MM_int_SetPageInfo(Addr, &pi);
+
+ memset( (void*)(Addr & ~(PAGE_SIZE-1)), 0, PAGE_SIZE );
+
+ return ;
+ }
+ else if( MM_GetRefCount(pi.PhysAddr) > 1 )
+ {
+ // Duplicate the page
+ tPAddr newpage;
+ void *dst, *src;
+
+ newpage = MM_AllocPhys();
+ if(!newpage) {
+ Log_Error("MMVirt", "Unable to allocate new page for COW");
+ for(;;);
+ }
+ dst = (void*)MM_MapTemp(newpage);
+ src = (void*)(Addr & ~(PAGE_SIZE-1));
+ memcpy( dst, src, PAGE_SIZE );
+ MM_FreeTemp( (tVAddr)dst );
+
+ #if TRACE_COW
+ Log_Notice("MMVirt", "COW %p caused by %p, %P duped to %P (RefCnt(%i)--)", Addr, PC,
+ pi.PhysAddr, newpage, MM_GetRefCount(pi.PhysAddr));
+ #endif
+
+ MM_DerefPhys(pi.PhysAddr);
+ pi.PhysAddr = newpage;
+ }
+ #if TRACE_COW
+ else {
+ Log_Notice("MMVirt", "COW %p caused by %p, took last reference to %P",
+ Addr, PC, pi.PhysAddr);
+ }
+ #endif
+ // Unset COW
+ pi.AP = AP_RW_BOTH;
+ MM_int_SetPageInfo(Addr, &pi);
+ return ;
+ }
+
+
+ Log_Error("MMVirt", "Code at %p accessed %p (DFSR = 0x%x)%s", PC, Addr, DFSR,
+ (bPrefetch ? " - Prefetch" : "")
+ );
+ if( Addr < 0x80000000 )
+ MM_DumpTables(0, 0x80000000);
+ else
+ MM_DumpTables(0x80000000, -1);
+ for(;;);
+}
+