X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;f=Kernel%2Farch%2Farmv7%2Fmm_virt.c;h=4a8a1fa353b4030c599c6b509095b35a64fa6a04;hb=e31829ecc2b8ae2338745f4ed393748704a81531;hp=0fc426d4842ca12b28cdb67a66c6423dfad1db5b;hpb=f9c581641afeb556188e84428febd4011e61edc2;p=tpg%2Facess2.git diff --git a/Kernel/arch/armv7/mm_virt.c b/Kernel/arch/armv7/mm_virt.c index 0fc426d4..4a8a1fa3 100644 --- a/Kernel/arch/armv7/mm_virt.c +++ b/Kernel/arch/armv7/mm_virt.c @@ -13,6 +13,7 @@ #define AP_KRO_ONLY 0x5 #define AP_RW_BOTH 0x3 #define AP_RO_BOTH 0x6 +#define PADDR_MASK_LVL1 0xFFFFFC00 // === IMPORTS === extern Uint32 kernel_table0[]; @@ -31,14 +32,21 @@ typedef struct //#define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>20)]) #define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>22)]) +#define USRFRACTAL(table1, addr) ((table1)[ (0x7F8/4*1024) + ((addr)>>22)]) #define TLBIALL() __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0)) +#define TLBIMVA(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 1" : : "r" (addr)) // === PROTOTYPES === void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1); int MM_int_AllocateCoarse(tVAddr VAddr, int Domain); int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi); int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi); +tPAddr MM_AllocateRootTable(void); +void MM_int_CloneTable(Uint32 *DestEnt, int Table); +tPAddr MM_Clone(void); tVAddr MM_NewKStack(int bGlobal); +void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info); +//void MM_DumpTables(tVAddr Start, tVAddr End); // === GLOBALS === @@ -117,7 +125,7 @@ int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) Uint32 *table0, *table1; Uint32 *desc; - ENTER("pVADdr ppi", VAddr, pi); + ENTER("pVAddr ppi", VAddr, pi); MM_int_GetTables(VAddr, &table0, &table1); @@ -151,6 +159,7 @@ int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) if( pi->bShared) *desc |= 1 << 10; // S *desc |= (pi->AP & 3) << 4; // AP *desc |= ((pi->AP >> 2) & 1) << 9; // APX + TLBIMVA(VAddr & 0xFFFFF000); LEAVE('i', 0); return 0; } @@ -158,6 +167,7 @@ int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) { // Large page // TODO: + Log_Warning("MMVirt", "TODO: Implement large pages in MM_int_SetPageInfo"); } break; case 20: // Section or unmapped @@ -195,12 +205,12 @@ int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) return 1; } -extern tShortSpinlock glDebug_Lock; - int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) { Uint32 *table0, *table1; Uint32 desc; + +// LogF("MM_int_GetPageInfo: VAddr=%p, pi=%p\n", VAddr, pi); MM_int_GetTables(VAddr, &table0, &table1); @@ -212,7 +222,7 @@ int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) pi->bExecutable = 1; pi->bGlobal = 0; pi->bShared = 0; - + pi->AP = 0; switch( (desc & 3) ) { @@ -240,15 +250,19 @@ int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) case 1: pi->Size = 16; pi->PhysAddr = desc & 0xFFFF0000; + pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2); + pi->bExecutable = !(desc & 0x8000); + pi->bShared = (desc >> 10) & 1; return 0; // 2/3: Small page case 2: case 3: pi->Size = 12; pi->PhysAddr = desc & 0xFFFFF000; - pi->bExecutable = desc & 1; + pi->bExecutable = !(desc & 1); pi->bGlobal = !(desc >> 11); pi->bShared = (desc >> 10) & 1; + pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2); return 0; } return 1; @@ -261,7 +275,8 @@ int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) pi->PhysAddr |= (Uint64)((desc >> 20) & 0xF) << 32; pi->PhysAddr |= (Uint64)((desc >> 5) & 0x7) << 36; pi->Size = 24; - pi->Domain = 0; // Superpages default to zero + pi->Domain = 0; // Supersections default to zero + pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2); return 0; } @@ -269,6 +284,7 @@ int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) pi->PhysAddr = desc & 0xFFF80000; pi->Size = 20; pi->Domain = (desc >> 5) & 7; + pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2); return 0; // 3: Reserved (invalid) @@ -329,6 +345,8 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) int MM_Map(tVAddr VAddr, tPAddr PAddr) { tMM_PageInfo pi = {0}; +// Log("MM_Map %P=>%p", PAddr, VAddr); + pi.PhysAddr = PAddr; pi.Size = 12; pi.AP = AP_KRW_ONLY; // Kernel Read/Write @@ -375,6 +393,175 @@ void MM_Deallocate(tVAddr VAddr) MM_int_SetPageInfo(VAddr, &pi); } +tPAddr MM_AllocateRootTable(void) +{ + tPAddr ret; + + ret = MM_AllocPhysRange(2, -1); + if( ret & 0x1000 ) { + MM_DerefPhys(ret); + MM_DerefPhys(ret+0x1000); + ret = MM_AllocPhysRange(3, -1); + if( ret & 0x1000 ) { + MM_DerefPhys(ret); + ret += 0x1000; +// Log("MM_AllocateRootTable: Second try not aligned, %P", ret); + } + else { + MM_DerefPhys(ret + 0x2000); +// Log("MM_AllocateRootTable: Second try aligned, %P", ret); + } + } +// else +// Log("MM_AllocateRootTable: Got it in one, %P", ret); + return ret; +} + +void MM_int_CloneTable(Uint32 *DestEnt, int Table) +{ + tPAddr table; + Uint32 *tmp_map; + Uint32 *cur = (void*)MM_TABLE0USER; +// Uint32 *cur = &FRACTAL(MM_TABLE1USER,0); + int i; + + table = MM_AllocPhys(); + if(!table) return ; + + tmp_map = (void*)MM_MapTemp(table); + + for( i = 0; i < 1024; i ++ ) + { + switch(cur[i] & 3) + { + case 0: tmp_map[i] = 0; break; + case 1: + tmp_map[i] = 0; + Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable"); + // Large page? + break; + case 2: + case 3: + // Small page + // - If full RW + if( (cur[Table*256] & 0x230) == 0x030 ) + cur[Table*256+i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO) + tmp_map[i] = cur[Table*256+i]; + break; + } + } + + DestEnt[0] = table + 0*0x400 + 1; + DestEnt[1] = table + 1*0x400 + 1; + DestEnt[2] = table + 2*0x400 + 1; + DestEnt[3] = table + 3*0x400 + 1; +} + +tPAddr MM_Clone(void) +{ + tPAddr ret; + Uint32 *new_lvl1_1, *new_lvl1_2, *cur; + Uint32 *tmp_map; + int i; + + ret = MM_AllocateRootTable(); + + cur = (void*)MM_TABLE0USER; + new_lvl1_1 = (void*)MM_MapTemp(ret); + new_lvl1_2 = (void*)MM_MapTemp(ret+0x1000); + tmp_map = new_lvl1_1; + new_lvl1_1[0] = 0x8202; // Section mapping the first meg for exception vectors (K-RO) + for( i = 1; i < 0x800-4; i ++ ) + { +// Log("i = %i", i); + if( i == 0x400 ) + tmp_map = &new_lvl1_2[-0x400]; + switch( cur[i] & 3 ) + { + case 0: tmp_map[i] = 0; break; + case 1: + MM_int_CloneTable(&tmp_map[i], i); + i += 3; // Tables are alocated in blocks of 4 + break; + case 2: + case 3: + Log_Error("MMVirt", "TODO: Support Sections/Supersections in MM_Clone (i=%i)", i); + tmp_map[i] = 0; + break; + } + } + + // Allocate Fractal table + { + int j, num; + tPAddr tmp = MM_AllocPhys(); + Uint32 *table = (void*)MM_MapTemp(tmp); + Uint32 sp; + register Uint32 __SP asm("sp"); + Log("new_lvl1_2 = %p, &new_lvl1_2[0x3FC] = %p", new_lvl1_2, &new_lvl1_2[0x3FC]); + // Map table to last 4MiB of user space + new_lvl1_2[0x3FC] = tmp + 0*0x400 + 1; + new_lvl1_2[0x3FD] = tmp + 1*0x400 + 1; + new_lvl1_2[0x3FE] = tmp + 2*0x400 + 1; + new_lvl1_2[0x3FF] = tmp + 3*0x400 + 1; + + tmp_map = new_lvl1_1; + for( j = 0; j < 512; j ++ ) + { + if( j == 256 ) + tmp_map = &new_lvl1_2[-0x400]; + if( (tmp_map[j*4] & 3) == 1 ) + { + table[j] = tmp_map[j*4] & PADDR_MASK_LVL1;// 0xFFFFFC00; + table[j] |= 0x813; // nG, Kernel Only, Small page, XN + } + else + table[j] = 0; + } + // Fractal + table[j++] = (ret + 0x0000) | 0x813; + table[j++] = (ret + 0x1000) | 0x813; + Log("table[%i] = %x, table[%i] = %x", j-2, table[j-2], j-1, table[j-1]); + for( ; j < 1024; j ++ ) + table[j] = 0; + + // Get kernel stack bottom + sp = __SP & ~(MM_KSTACK_SIZE-1); + j = (sp / 0x1000) % 1024; + num = MM_KSTACK_SIZE/0x1000; + Log("sp = %p, j = %i", sp, j); + + // Copy stack pages + for(; num--; j ++, sp += 0x1000) + { + tVAddr page; + void *tmp_page; + + page = MM_AllocPhys(); + table[j] = page | 0x813; + + tmp_page = (void*)MM_MapTemp(page); + memcpy(tmp_page, (void*)sp, 0x1000); + MM_FreeTemp( (tVAddr) tmp_page ); + } + +// Debug_HexDump("MMVirt - last table", table, 0x1000); + + MM_FreeTemp( (tVAddr)table ); + } + +// Debug_HexDump("MMVirt - Return page 1", new_lvl1_1, 0x1000); +// Debug_HexDump("MMVirt - Return page 2", new_lvl1_2, 0x1000); + + MM_FreeTemp( (tVAddr)new_lvl1_1 ); + MM_FreeTemp( (tVAddr)new_lvl1_2 ); + +// Log("Table dump"); +// MM_DumpTables(0, -1); + + return ret; +} + tPAddr MM_ClearUser(void) { // TODO: Implement ClearUser @@ -383,13 +570,81 @@ tPAddr MM_ClearUser(void) tVAddr MM_MapTemp(tPAddr PAddr) { - // TODO: Implement MapTemp + tVAddr ret; + tMM_PageInfo pi; + + for( ret = MM_TMPMAP_BASE; ret < MM_TMPMAP_END - PAGE_SIZE; ret += PAGE_SIZE ) + { + if( MM_int_GetPageInfo(ret, &pi) == 0 ) + continue; + +// Log("MapTemp %P at %p", PAddr, ret); + MM_RefPhys(PAddr); // Counter the MM_Deallocate in FreeTemp + MM_Map(ret, PAddr); + + return ret; + } + Log_Warning("MMVirt", "MM_MapTemp: All slots taken"); return 0; } void MM_FreeTemp(tVAddr VAddr) { // TODO: Implement FreeTemp + if( VAddr < MM_TMPMAP_BASE || VAddr >= MM_TMPMAP_END ) { + Log_Warning("MMVirt", "MM_FreeTemp: Passed an addr not from MM_MapTemp (%p)", VAddr); + return ; + } + + MM_Deallocate(VAddr); +} + +tVAddr MM_MapHWPages(tPAddr PAddr, Uint NPages) +{ + tVAddr ret; + int i; + tMM_PageInfo pi; + + ENTER("xPAddr iNPages", PAddr, NPages); + + // Scan for a location + for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_END - NPages * PAGE_SIZE; ret += PAGE_SIZE ) + { +// LOG("checking %p", ret); + // Check if there is `NPages` free pages + for( i = 0; i < NPages; i ++ ) + { + if( MM_int_GetPageInfo(ret + i*PAGE_SIZE, &pi) == 0 ) + break; + } + // Nope, jump to after the used page found and try again +// LOG("i = %i, ==? %i", i, NPages); + if( i != NPages ) { + ret += i * PAGE_SIZE; + continue ; + } + + // Map the pages + for( i = 0; i < NPages; i ++ ) + MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAddr); + // and return + LEAVE('p', ret); + return ret; + } + Log_Warning("MMVirt", "MM_MapHWPages: No space for a %i page block", NPages); + LEAVE('p', 0); + return 0; +} + +tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PAddr) +{ + Log_Error("MMVirt", "TODO: Implement MM_AllocDMA"); + return 0; +} + +void MM_UnmapHWPages(tVAddr Vaddr, Uint Number) +{ + Log_Error("MMVirt", "TODO: Implement MM_UnmapHWPages"); } tVAddr MM_NewKStack(int bShared) @@ -421,7 +676,8 @@ tVAddr MM_NewKStack(int bShared) // 1 guard page for( ofs = PAGE_SIZE; ofs < MM_KSTACK_SIZE; ofs += PAGE_SIZE ) { - if( MM_Allocate(addr + ofs) == 0 ) { + if( MM_Allocate(addr + ofs) == 0 ) + { while(ofs) { ofs -= PAGE_SIZE; @@ -434,8 +690,48 @@ tVAddr MM_NewKStack(int bShared) return addr + ofs; } +void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info) +{ + Log("%p => %8x - 0x%7x %i %x", + Start, Info->PhysAddr-Len, Len, + Info->Domain, + Info->AP + ); +} + void MM_DumpTables(tVAddr Start, tVAddr End) { + tVAddr range_start = 0, addr; + tMM_PageInfo pi, pi_old; + int i = 0, inRange=0; + pi_old.Size = 0; + + Log("Page Table Dump:"); + range_start = Start; + for( addr = Start; i == 0 || (addr && addr < End); i = 1 ) + { +// Log("addr = %p", addr); + int rv = MM_int_GetPageInfo(addr, &pi); + if( rv + || pi.Size != pi_old.Size + || pi.Domain != pi_old.Domain + || pi.AP != pi_old.AP + || pi_old.PhysAddr != pi.PhysAddr ) + { + if(inRange) { + MM_int_DumpTableEnt(range_start, addr - range_start, &pi_old); + } + addr &= ~((1 << pi.Size)-1); + range_start = addr; + } + + pi_old = pi; + pi_old.PhysAddr += 1 << pi_old.Size; + addr += 1 << pi_old.Size; + inRange = (rv == 0); + } + if(inRange) + MM_int_DumpTableEnt(range_start, addr - range_start, &pi); }