+ tPAddr ret;
+
+ ret = MM_AllocPhysRange(2, -1);
+ if( ret & 0x1000 ) {
+ MM_DerefPhys(ret);
+ MM_DerefPhys(ret+0x1000);
+ ret = MM_AllocPhysRange(3, -1);
+ if( ret & 0x1000 ) {
+ MM_DerefPhys(ret);
+ ret += 0x1000;
+// Log("MM_AllocateRootTable: Second try not aligned, %P", ret);
+ }
+ else {
+ MM_DerefPhys(ret + 0x2000);
+// Log("MM_AllocateRootTable: Second try aligned, %P", ret);
+ }
+ }
+// else
+// Log("MM_AllocateRootTable: Got it in one, %P", ret);
+ return ret;
+}
+
+void MM_int_CloneTable(Uint32 *DestEnt, int Table)
+{
+ tPAddr table;
+ Uint32 *tmp_map;
+ Uint32 *cur = (void*)MM_TABLE1USER;
+// Uint32 *cur = &FRACTAL(MM_TABLE1USER,0);
+ int i;
+
+ table = MM_AllocPhys();
+ if(!table) return ;
+
+ cur += 256*Table;
+
+ tmp_map = (void*)MM_MapTemp(table);
+
+ for( i = 0; i < 1024; i ++ )
+ {
+// Log_Debug("MMVirt", "cur[%i] (%p) = %x", Table*256+i, &cur[Table*256+i], cur[Table*256+i]);
+ switch(cur[i] & 3)
+ {
+ case 0: tmp_map[i] = 0; break;
+ case 1:
+ tmp_map[i] = 0;
+ Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable (%p)", (Table*256+i)*0x1000);
+ // Large page?
+ break;
+ case 2:
+ case 3:
+ // Small page
+ // - If full RW
+// Debug("%p cur[%i] & 0x230 = 0x%x", Table*256*0x1000, i, cur[i] & 0x230);
+ if( (cur[i] & 0x230) == 0x010 )
+ {
+ void *dst, *src;
+ tPAddr newpage;
+ newpage = MM_AllocPhys();
+ src = (void*)( (Table*256+i)*0x1000 );
+ dst = (void*)MM_MapTemp(newpage);
+// Debug("Taking a copy of kernel page %p (%P)", src, cur[i] & ~0xFFF);
+ memcpy(dst, src, PAGE_SIZE);
+ MM_FreeTemp( (tVAddr)dst );
+ tmp_map[i] = newpage | (cur[i] & 0xFFF);
+ }
+ else
+ {
+ if( (cur[i] & 0x230) == 0x030 )
+ cur[i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO)
+ tmp_map[i] = cur[i];
+ MM_RefPhys( tmp_map[i] & ~0xFFF );
+ }
+ break;
+ }
+ }
+ MM_FreeTemp( (tVAddr) tmp_map );
+
+ DestEnt[0] = table + 0*0x400 + 1;
+ DestEnt[1] = table + 1*0x400 + 1;
+ DestEnt[2] = table + 2*0x400 + 1;
+ DestEnt[3] = table + 3*0x400 + 1;
+}
+
+tPAddr MM_Clone(void)
+{
+ tPAddr ret;
+ Uint32 *new_lvl1_1, *new_lvl1_2, *cur;
+ Uint32 *tmp_map;
+ int i;
+
+// MM_DumpTables(0, KERNEL_BASE);
+
+ ret = MM_AllocateRootTable();
+
+ cur = (void*)MM_TABLE0USER;
+ new_lvl1_1 = (void*)MM_MapTemp(ret);
+ new_lvl1_2 = (void*)MM_MapTemp(ret+0x1000);
+ tmp_map = new_lvl1_1;
+ for( i = 0; i < 0x800-4; i ++ )
+ {
+ // HACK! Ignore the original identity mapping
+ if( i == 0 && Threads_GetTID() == 0 ) {
+ tmp_map[0] = 0;
+ continue;
+ }
+ if( i == 0x400 )
+ tmp_map = &new_lvl1_2[-0x400];
+ switch( cur[i] & 3 )
+ {
+ case 0: tmp_map[i] = 0; break;
+ case 1:
+ MM_int_CloneTable(&tmp_map[i], i);
+ i += 3; // Tables are alocated in blocks of 4
+ break;
+ case 2:
+ case 3:
+ Log_Error("MMVirt", "TODO: Support Sections/Supersections in MM_Clone (i=%i)", i);
+ tmp_map[i] = 0;
+ break;
+ }
+ }
+
+ // Allocate Fractal table
+ {
+ int j, num;
+ tPAddr tmp = MM_AllocPhys();
+ Uint32 *table = (void*)MM_MapTemp(tmp);
+ Uint32 sp;
+ register Uint32 __SP asm("sp");
+
+ // Map table to last 4MiB of user space
+ new_lvl1_2[0x3FC] = tmp + 0*0x400 + 1;
+ new_lvl1_2[0x3FD] = tmp + 1*0x400 + 1;
+ new_lvl1_2[0x3FE] = tmp + 2*0x400 + 1;
+ new_lvl1_2[0x3FF] = tmp + 3*0x400 + 1;
+
+ tmp_map = new_lvl1_1;
+ for( j = 0; j < 512; j ++ )
+ {
+ if( j == 256 )
+ tmp_map = &new_lvl1_2[-0x400];
+ if( (tmp_map[j*4] & 3) == 1 )
+ {
+ table[j] = tmp_map[j*4] & PADDR_MASK_LVL1;// 0xFFFFFC00;
+ table[j] |= 0x813; // nG, Kernel Only, Small page, XN
+ }
+ else
+ table[j] = 0;
+ }
+ // Fractal
+ table[j++] = (ret + 0x0000) | 0x813;
+ table[j++] = (ret + 0x1000) | 0x813;
+ // Nuke the rest
+ for( ; j < 1024; j ++ )
+ table[j] = 0;
+
+ // Get kernel stack bottom
+ sp = __SP & ~(MM_KSTACK_SIZE-1);
+ j = (sp / 0x1000) % 1024;
+ num = MM_KSTACK_SIZE/0x1000;
+
+// Log("num = %i, sp = %p, j = %i", num, sp, j);
+
+ // Copy stack pages
+ for(; num--; j ++, sp += 0x1000)
+ {
+ tVAddr page;
+ void *tmp_page;
+
+ page = MM_AllocPhys();
+// Log("page = %P", page);
+ table[j] = page | 0x813;
+
+ tmp_page = (void*)MM_MapTemp(page);
+ memcpy(tmp_page, (void*)sp, 0x1000);
+ MM_FreeTemp( (tVAddr) tmp_page );
+ }
+
+ MM_FreeTemp( (tVAddr)table );
+ }
+
+ MM_FreeTemp( (tVAddr)new_lvl1_1 );
+ MM_FreeTemp( (tVAddr)new_lvl1_2 );
+
+ return ret;
+}
+
+void MM_ClearUser(void)
+{
+ int i, j;
+ const int user_table_count = USER_STACK_TOP / (256*0x1000);
+ Uint32 *cur = (void*)MM_TABLE0USER;
+ Uint32 *tab;
+
+// MM_DumpTables(0, 0x80000000);
+
+// Log("user_table_count = %i (as opposed to %i)", user_table_count, 0x800-4);
+
+ for( i = 0; i < user_table_count; i ++ )
+ {
+ switch( cur[i] & 3 )
+ {
+ case 0: break; // Already unmapped
+ case 1: // Sub pages
+ tab = (void*)(MM_TABLE1USER + i*256*sizeof(Uint32));
+ for( j = 0; j < 1024; j ++ )
+ {
+ switch( tab[j] & 3 )
+ {
+ case 0: break; // Unmapped
+ case 1:
+ Log_Error("MMVirt", "TODO: Support large pages in MM_ClearUser");
+ break;
+ case 2:
+ case 3:
+ MM_DerefPhys( tab[j] & ~(PAGE_SIZE-1) );
+ break;
+ }
+ }
+ MM_DerefPhys( cur[i] & ~(PAGE_SIZE-1) );
+ cur[i+0] = 0;
+ cur[i+1] = 0;
+ cur[i+2] = 0;
+ i += 3;
+ break;
+ case 2:
+ case 3:
+ Log_Error("MMVirt", "TODO: Implement sections/supersections in MM_ClearUser");
+ break;
+ }
+ cur[i] = 0;
+ }
+
+ // Final block of 4 tables are KStack
+ i = 0x800 - 4;
+
+ // Clear out unused stacks
+ {
+ register Uint32 __SP asm("sp");
+ int cur_stack_base = ((__SP & ~(MM_KSTACK_SIZE-1)) / PAGE_SIZE) % 1024;
+
+ tab = (void*)(MM_TABLE1USER + i*256*sizeof(Uint32));
+
+ // First 512 is the Table1 mapping + 2 for Table0 mapping
+ for( j = 512+2; j < 1024; j ++ )
+ {
+ // Skip current stack
+ if( j == cur_stack_base ) {
+ j += (MM_KSTACK_SIZE / PAGE_SIZE) - 1;
+ continue ;
+ }
+ if( !(tab[j] & 3) ) continue;
+ ASSERT( (tab[j] & 3) == 2 );
+ MM_DerefPhys( tab[j] & ~(PAGE_SIZE) );
+ tab[j] = 0;
+ }
+ }
+
+
+// MM_DumpTables(0, 0x80000000);