+tPAddr MM_AllocateRootTable(void)
+{
+ tPAddr ret;
+
+ ret = MM_AllocPhysRange(2, -1);
+ if( ret & 0x1000 ) {
+ MM_DerefPhys(ret);
+ MM_DerefPhys(ret+0x1000);
+ ret = MM_AllocPhysRange(3, -1);
+ if( ret & 0x1000 ) {
+ MM_DerefPhys(ret);
+ ret += 0x1000;
+// Log("MM_AllocateRootTable: Second try not aligned, %P", ret);
+ }
+ else {
+ MM_DerefPhys(ret + 0x2000);
+// Log("MM_AllocateRootTable: Second try aligned, %P", ret);
+ }
+ }
+// else
+// Log("MM_AllocateRootTable: Got it in one, %P", ret);
+ return ret;
+}
+
+void MM_int_CloneTable(Uint32 *DestEnt, int Table)
+{
+ tPAddr table;
+ Uint32 *tmp_map;
+ Uint32 *cur = (void*)MM_TABLE1USER;
+// Uint32 *cur = &FRACTAL(MM_TABLE1USER,0);
+ int i;
+
+ table = MM_AllocPhys();
+ if(!table) return ;
+
+ tmp_map = (void*)MM_MapTemp(table);
+
+ for( i = 0; i < 1024; i ++ )
+ {
+// Log_Debug("MMVirt", "cur[%i] (%p) = %x", Table*256+i, &cur[Table*256+i], cur[Table*256+i]);
+ switch(cur[Table*256+i] & 3)
+ {
+ case 0: tmp_map[i] = 0; break;
+ case 1:
+ tmp_map[i] = 0;
+ Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable (%p)", (Table*256+i)*0x1000);
+ // Large page?
+ break;
+ case 2:
+ case 3:
+ // Small page
+ // - If full RW
+ if( (cur[Table*256] & 0x230) == 0x030 )
+ cur[Table*256+i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO)
+ tmp_map[i] = cur[Table*256+i];
+ break;
+ }
+ }
+
+ DestEnt[0] = table + 0*0x400 + 1;
+ DestEnt[1] = table + 1*0x400 + 1;
+ DestEnt[2] = table + 2*0x400 + 1;
+ DestEnt[3] = table + 3*0x400 + 1;
+}
+
+tPAddr MM_Clone(void)
+{
+ tPAddr ret;
+ Uint32 *new_lvl1_1, *new_lvl1_2, *cur;
+ Uint32 *tmp_map;
+ int i;
+
+// MM_DumpTables(0, KERNEL_BASE);
+
+ ret = MM_AllocateRootTable();
+
+ cur = (void*)MM_TABLE0USER;
+ new_lvl1_1 = (void*)MM_MapTemp(ret);
+ new_lvl1_2 = (void*)MM_MapTemp(ret+0x1000);
+ tmp_map = new_lvl1_1;
+ for( i = 0; i < 0x800-4; i ++ )
+ {
+ // HACK! Ignore the original identity mapping
+ if( i == 0 && Threads_GetTID() == 0 ) {
+ tmp_map[0] = 0;
+ continue;
+ }
+ if( i == 0x400 )
+ tmp_map = &new_lvl1_2[-0x400];
+ switch( cur[i] & 3 )
+ {
+ case 0: tmp_map[i] = 0; break;
+ case 1:
+ MM_int_CloneTable(&tmp_map[i], i);
+ i += 3; // Tables are alocated in blocks of 4
+ break;
+ case 2:
+ case 3:
+ Log_Error("MMVirt", "TODO: Support Sections/Supersections in MM_Clone (i=%i)", i);
+ tmp_map[i] = 0;
+ break;
+ }
+ }
+
+ // Allocate Fractal table
+ {
+ int j, num;
+ tPAddr tmp = MM_AllocPhys();
+ Uint32 *table = (void*)MM_MapTemp(tmp);
+ Uint32 sp;
+ register Uint32 __SP asm("sp");
+
+ // Map table to last 4MiB of user space
+ new_lvl1_2[0x3FC] = tmp + 0*0x400 + 1;
+ new_lvl1_2[0x3FD] = tmp + 1*0x400 + 1;
+ new_lvl1_2[0x3FE] = tmp + 2*0x400 + 1;
+ new_lvl1_2[0x3FF] = tmp + 3*0x400 + 1;
+
+ tmp_map = new_lvl1_1;
+ for( j = 0; j < 512; j ++ )
+ {
+ if( j == 256 )
+ tmp_map = &new_lvl1_2[-0x400];
+ if( (tmp_map[j*4] & 3) == 1 )
+ {
+ table[j] = tmp_map[j*4] & PADDR_MASK_LVL1;// 0xFFFFFC00;
+ table[j] |= 0x813; // nG, Kernel Only, Small page, XN
+ }
+ else
+ table[j] = 0;
+ }
+ // Fractal
+ table[j++] = (ret + 0x0000) | 0x813;
+ table[j++] = (ret + 0x1000) | 0x813;
+ // Nuke the rest
+ for( ; j < 1024; j ++ )
+ table[j] = 0;
+
+ // Get kernel stack bottom
+ sp = __SP & ~(MM_KSTACK_SIZE-1);
+ j = (sp / 0x1000) % 1024;
+ num = MM_KSTACK_SIZE/0x1000;
+
+ Log("num = %i, sp = %p, j = %i", num, sp, j);
+
+ // Copy stack pages
+ for(; num--; j ++, sp += 0x1000)
+ {
+ tVAddr page;
+ void *tmp_page;
+
+ page = MM_AllocPhys();
+ table[j] = page | 0x813;
+
+ tmp_page = (void*)MM_MapTemp(page);
+ memcpy(tmp_page, (void*)sp, 0x1000);
+ MM_FreeTemp( (tVAddr) tmp_page );
+ }
+
+ MM_FreeTemp( (tVAddr)table );
+ }
+
+ MM_FreeTemp( (tVAddr)new_lvl1_1 );
+ MM_FreeTemp( (tVAddr)new_lvl1_2 );
+
+ return ret;
+}
+