4 * ARM7 Virtual Memory Manager
5 * - arch/arm7/mm_virt.c
12 #define AP_KRW_ONLY 0x1
13 #define AP_KRO_ONLY 0x5
14 #define AP_RW_BOTH 0x3
15 #define AP_RO_BOTH 0x6
16 #define PADDR_MASK_LVL1 0xFFFFFC00
19 extern Uint32 kernel_table0[];
33 //#define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>20)])
34 #define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>22)])
35 #define USRFRACTAL(table1, addr) ((table1)[ (0x7F8/4*1024) + ((addr)>>22)])
36 #define TLBIALL() __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0))
39 void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1);
40 int MM_int_AllocateCoarse(tVAddr VAddr, int Domain);
41 int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
42 int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
43 tPAddr MM_AllocateRootTable(void);
44 void MM_int_CloneTable(Uint32 *DestEnt, int Table);
45 tPAddr MM_Clone(void);
46 tVAddr MM_NewKStack(int bGlobal);
47 void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info);
48 //void MM_DumpTables(tVAddr Start, tVAddr End);
53 int MM_InitialiseVirtual(void)
58 void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1)
60 if(VAddr & 0x80000000) {
61 *Table0 = (void*)&kernel_table0; // Level 0
62 *Table1 = (void*)MM_TABLE1KERN; // Level 1
65 *Table0 = (void*)MM_TABLE0USER;
66 *Table1 = (void*)MM_TABLE1USER;
70 int MM_int_AllocateCoarse(tVAddr VAddr, int Domain)
72 Uint32 *table0, *table1;
76 ENTER("xVAddr iDomain", VAddr, Domain);
78 MM_int_GetTables(VAddr, &table0, &table1);
80 VAddr &= ~(0x400000-1); // 4MiB per "block", 1 Page
82 desc = &table0[ VAddr>>20];
83 LOG("desc = %p", desc);
85 // table0: 4 bytes = 1 MiB
87 LOG("desc[0] = %x", desc[0]);
88 LOG("desc[1] = %x", desc[1]);
89 LOG("desc[2] = %x", desc[2]);
90 LOG("desc[3] = %x", desc[3]);
92 if( (desc[0] & 3) != 0 || (desc[1] & 3) != 0
93 || (desc[2] & 3) != 0 || (desc[3] & 3) != 0 )
100 paddr = MM_AllocPhys();
108 *desc = paddr | (Domain << 5) | 1;
109 desc[1] = desc[0] + 0x400;
110 desc[2] = desc[0] + 0x800;
111 desc[3] = desc[0] + 0xC00;
113 FRACTAL(table1, VAddr) = paddr | 3;
122 int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi)
124 Uint32 *table0, *table1;
127 ENTER("pVADdr ppi", VAddr, pi);
129 MM_int_GetTables(VAddr, &table0, &table1);
131 desc = &table0[ VAddr >> 20 ];
132 LOG("desc = %p", desc);
136 case 12: // Small Page
137 case 16: // Large Page
139 if( (*desc & 3) == 0 ) {
140 MM_int_AllocateCoarse( VAddr, pi->Domain );
142 desc = &table1[ VAddr >> 12 ];
143 LOG("desc (2) = %p", desc);
147 // - Error if overwriting a large page
148 if( (*desc & 3) == 1 ) LEAVE_RET('i', 1);
149 if( pi->PhysAddr == 0 ) {
155 *desc = (pi->PhysAddr & 0xFFFFF000) | 2;
156 if(!pi->bExecutable) *desc |= 1; // XN
157 if(!pi->bGlobal) *desc |= 1 << 11; // NG
158 if( pi->bShared) *desc |= 1 << 10; // S
159 *desc |= (pi->AP & 3) << 4; // AP
160 *desc |= ((pi->AP >> 2) & 1) << 9; // APX
170 case 20: // Section or unmapped
171 Warning("TODO: Implement sections");
173 case 24: // Supersection
174 // Error if not aligned
175 if( VAddr & 0xFFFFFF ) {
179 if( (*desc & 3) == 0 || ((*desc & 3) == 2 && (*desc & (1 << 18))) )
181 if( pi->PhysAddr == 0 ) {
183 // TODO: Apply to all entries
188 *desc = pi->PhysAddr & 0xFF000000;
189 // *desc |= ((pi->PhysAddr >> 32) & 0xF) << 20;
190 // *desc |= ((pi->PhysAddr >> 36) & 0x7) << 5;
191 *desc |= 2 | (1 << 18);
192 // TODO: Apply to all entries
205 int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi)
207 Uint32 *table0, *table1;
210 // LogF("MM_int_GetPageInfo: VAddr=%p, pi=%p\n", VAddr, pi);
212 MM_int_GetTables(VAddr, &table0, &table1);
214 desc = table0[ VAddr >> 20 ];
216 // if( VAddr > 0x90000000)
217 // LOG("table0 desc(%p) = %x", &table0[ VAddr >> 20 ], desc);
233 // 1: Coarse page table
235 // Domain from top level table
236 pi->Domain = (desc >> 5) & 7;
238 desc = table1[ VAddr >> 12 ];
239 // LOG("table1 desc(%p) = %x", &table1[ VAddr >> 12 ], desc);
246 // 1: Large Page (64KiB)
249 pi->PhysAddr = desc & 0xFFFF0000;
250 pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2);
251 pi->bExecutable = !(desc & 0x8000);
252 pi->bShared = (desc >> 10) & 1;
253 // LogF("Large page, VAddr = %p, table1[VAddr>>12] = %p, desc = %x\n", VAddr, &table1[ VAddr >> 12 ], desc);
254 // LogF("Par desc = %p %x\n", &table0[ VAddr >> 20 ], table0[ VAddr >> 20 ]);
260 pi->PhysAddr = desc & 0xFFFFF000;
261 pi->bExecutable = !(desc & 1);
262 pi->bGlobal = !(desc >> 11);
263 pi->bShared = (desc >> 10) & 1;
264 pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2);
269 // 2: Section (or Supersection)
271 if( desc & (1 << 18) ) {
273 pi->PhysAddr = desc & 0xFF000000;
274 pi->PhysAddr |= (Uint64)((desc >> 20) & 0xF) << 32;
275 pi->PhysAddr |= (Uint64)((desc >> 5) & 0x7) << 36;
277 pi->Domain = 0; // Supersections default to zero
278 pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2);
283 pi->PhysAddr = desc & 0xFFF80000;
285 pi->Domain = (desc >> 5) & 7;
286 pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2);
289 // 3: Reserved (invalid)
300 tPAddr MM_GetPhysAddr(tVAddr VAddr)
303 if( MM_int_GetPageInfo(VAddr, &pi) )
305 return pi.PhysAddr | (VAddr & ((1 << pi.Size)-1));
308 Uint MM_GetFlags(tVAddr VAddr)
313 if( MM_int_GetPageInfo(VAddr, &pi) )
321 ret |= MM_PFLAG_KERNEL;
324 ret |= MM_PFLAG_KERNEL|MM_PFLAG_RO;
333 if( pi.bExecutable ) ret |= MM_PFLAG_EXEC;
337 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
340 if( MM_int_GetPageInfo(VAddr, &pi) )
344 int MM_Map(tVAddr VAddr, tPAddr PAddr)
346 tMM_PageInfo pi = {0};
349 pi.AP = AP_KRW_ONLY; // Kernel Read/Write
351 if( MM_int_SetPageInfo(VAddr, &pi) ) {
352 MM_DerefPhys(pi.PhysAddr);
358 tPAddr MM_Allocate(tVAddr VAddr)
360 tMM_PageInfo pi = {0};
362 ENTER("pVAddr", VAddr);
364 pi.PhysAddr = MM_AllocPhys();
365 if( pi.PhysAddr == 0 ) LEAVE_RET('i', 0);
367 pi.AP = AP_KRW_ONLY; // Kernel Read/Write
369 if( MM_int_SetPageInfo(VAddr, &pi) ) {
370 MM_DerefPhys(pi.PhysAddr);
374 LEAVE('x', pi.PhysAddr);
378 void MM_Deallocate(tVAddr VAddr)
382 if( MM_int_GetPageInfo(VAddr, &pi) ) return ;
384 if( pi.PhysAddr == 0 ) return;
385 MM_DerefPhys(pi.PhysAddr);
390 MM_int_SetPageInfo(VAddr, &pi);
393 tPAddr MM_AllocateRootTable(void)
397 ret = MM_AllocPhysRange(2, -1);
400 MM_DerefPhys(ret+0x1000);
401 ret = MM_AllocPhysRange(3, -1);
407 MM_DerefPhys(ret + 0x2000);
413 void MM_int_CloneTable(Uint32 *DestEnt, int Table)
417 Uint32 *cur = (void*)MM_TABLE0USER;
418 // Uint32 *cur = &FRACTAL(MM_TABLE1USER,0);
421 table = MM_AllocPhys();
424 tmp_map = (void*)MM_MapTemp(table);
426 for( i = 0; i < 1024; i ++ )
430 case 0: tmp_map[i] = 0; break;
433 Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable");
440 if( (cur[Table*256] & 0x230) == 0x030 )
441 cur[Table*256+i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO)
442 tmp_map[i] = cur[Table*256+i];
447 DestEnt[0] = table + 0*0x400 + 1;
448 DestEnt[1] = table + 1*0x400 + 1;
449 DestEnt[2] = table + 2*0x400 + 1;
450 DestEnt[3] = table + 3*0x400 + 1;
453 tPAddr MM_Clone(void)
456 Uint32 *new_lvl1_1, *new_lvl1_2, *cur;
460 ret = MM_AllocateRootTable();
462 cur = (void*)MM_TABLE0USER;
463 new_lvl1_1 = (void*)MM_MapTemp(ret);
464 new_lvl1_2 = (void*)MM_MapTemp(ret+0x1000);
465 tmp_map = new_lvl1_1;
466 new_lvl1_1[0] = 0x8202; // Section mapping the first meg for exception vectors (K-RO)
467 for( i = 1; i < 0x800-4; i ++ )
471 tmp_map = &new_lvl1_2[-0x400];
472 Log("tmp_map = %p", tmp_map);
476 case 0: tmp_map[i] = 0; break;
478 MM_int_CloneTable(&tmp_map[i], i);
479 i += 3; // Tables are alocated in blocks of 4
483 Log_Error("MMVirt", "TODO: Support Sections/Supersections in MM_Clone (i=%i)", i);
489 // Allocate Fractal table
492 tPAddr tmp = MM_AllocPhys();
493 Uint32 *table = (void*)MM_MapTemp(tmp);
495 register Uint32 __SP asm("sp");
496 // Map table to last 4MiB of user space
497 tmp_map[i+0] = tmp + 0*0x400 + 1;
498 tmp_map[i+1] = tmp + 1*0x400 + 1;
499 tmp_map[i+2] = tmp + 2*0x400 + 1;
500 tmp_map[i+3] = tmp + 3*0x400 + 1;
501 for( j = 0; j < 256; j ++ ) {
502 table[j] = new_lvl1_1[j*4] & PADDR_MASK_LVL1;// 0xFFFFFC00;
503 table[j] |= 0x10|3; // Kernel Only, Small table, XN
505 for( ; j < 512; j ++ ) {
506 table[j] = new_lvl1_2[(j-256)*4] & PADDR_MASK_LVL1;// 0xFFFFFC00;
507 table[j] |= 0x10|3; // Kernel Only, Small table, XN
509 for( ; j < 1024; j ++ )
512 // Get kernel stack bottom
514 sp &= ~(MM_KSTACK_SIZE-1);
515 j = (sp / 0x1000) % 1024;
516 num = MM_KSTACK_SIZE/0x1000;
517 Log("sp = %p, j = %i", sp, j);
520 for(; num--; j ++, sp += 0x1000)
522 tVAddr page = MM_AllocPhys();
524 table[j] = page | 0x13;
525 tmp_page = (void*)MM_MapTemp(page);
526 memcpy(tmp_page, (void*)sp, 0x1000);
527 MM_FreeTemp( (tVAddr)tmp_page );
530 MM_FreeTemp( (tVAddr)table );
533 tmp_map = &tmp_map[0x400];
534 MM_FreeTemp( (tVAddr)tmp_map );
537 MM_DumpTables(0, -1);
542 tPAddr MM_ClearUser(void)
544 // TODO: Implement ClearUser
548 tVAddr MM_MapTemp(tPAddr PAddr)
553 for( ret = MM_TMPMAP_BASE; ret < MM_TMPMAP_END - PAGE_SIZE; ret += PAGE_SIZE )
555 if( MM_int_GetPageInfo(ret, &pi) == 0 )
562 Log_Warning("MMVirt", "MM_MapTemp: All slots taken");
566 void MM_FreeTemp(tVAddr VAddr)
568 // TODO: Implement FreeTemp
569 if( VAddr < MM_TMPMAP_BASE || VAddr >= MM_TMPMAP_END ) {
570 Log_Warning("MMVirt", "MM_FreeTemp: Passed an addr not from MM_MapTemp (%p)", VAddr);
574 MM_Deallocate(VAddr);
577 tVAddr MM_MapHWPages(tPAddr PAddr, Uint NPages)
583 ENTER("xPAddr iNPages", PAddr, NPages);
585 // Scan for a location
586 for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_END - NPages * PAGE_SIZE; ret += PAGE_SIZE )
588 // LOG("checking %p", ret);
589 // Check if there is `NPages` free pages
590 for( i = 0; i < NPages; i ++ )
592 if( MM_int_GetPageInfo(ret + i*PAGE_SIZE, &pi) == 0 )
595 // Nope, jump to after the used page found and try again
596 // LOG("i = %i, ==? %i", i, NPages);
598 ret += i * PAGE_SIZE;
603 for( i = 0; i < NPages; i ++ )
604 MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAddr);
609 Log_Warning("MMVirt", "MM_MapHWPages: No space for a %i page block", NPages);
614 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PAddr)
616 Log_Error("MMVirt", "TODO: Implement MM_AllocDMA");
620 void MM_UnmapHWPages(tVAddr Vaddr, Uint Number)
622 Log_Error("MMVirt", "TODO: Implement MM_UnmapHWPages");
625 tVAddr MM_NewKStack(int bShared)
627 tVAddr min_addr, max_addr;
631 min_addr = MM_GLOBALSTACKS;
632 max_addr = MM_GLOBALSTACKS_END;
635 min_addr = MM_KSTACK_BASE;
636 max_addr = MM_KSTACK_END;
639 // Locate a free slot
640 for( addr = min_addr; addr < max_addr; addr += MM_KSTACK_SIZE )
643 if( MM_int_GetPageInfo(addr+MM_KSTACK_SIZE-PAGE_SIZE, &pi) ) break;
646 // Check for an error
647 if(addr >= max_addr) {
652 for( ofs = PAGE_SIZE; ofs < MM_KSTACK_SIZE; ofs += PAGE_SIZE )
654 if( MM_Allocate(addr + ofs) == 0 )
659 MM_Deallocate(addr + ofs);
661 Log_Warning("MMVirt", "MM_NewKStack: Unable to allocate");
668 void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info)
670 Log("%p => %8x - 0x%7x %i %x",
671 Start, Info->PhysAddr-Len, Len,
677 void MM_DumpTables(tVAddr Start, tVAddr End)
679 tVAddr range_start = 0, addr;
680 tMM_PageInfo pi, pi_old;
681 int i = 0, inRange=0;
686 for( addr = Start; i == 0 || (addr && addr < End); i = 1 )
688 int rv = MM_int_GetPageInfo(addr, &pi);
690 || pi.Size != pi_old.Size
691 || pi.Domain != pi_old.Domain
692 || pi.AP != pi_old.AP
693 || pi_old.PhysAddr != pi.PhysAddr )
696 MM_int_DumpTableEnt(range_start, addr - range_start, &pi_old);
698 addr &= ~((1 << pi.Size)-1);
703 pi_old.PhysAddr += 1 << pi_old.Size;
704 addr += 1 << pi_old.Size;
708 MM_int_DumpTableEnt(range_start, addr - range_start, &pi);