+ for( i = 0; i < 256; i ++)
+ {
+ TMPMAPLVL4(i) = PAGEMAPLVL4(i);
+// Log_Debug("MM", "TMPMAPLVL4(%i) = 0x%016llx", i, TMPMAPLVL4(i));
+ if( TMPMAPLVL4(i) & 1 )
+ {
+ MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
+ TMPMAPLVL4(i) |= PF_COW;
+ TMPMAPLVL4(i) &= ~PF_WRITE;
+ }
+ }
+
+ // #4 Map in kernel pages
+ for( i = 256; i < 512; i ++ )
+ {
+ // Skip addresses:
+ // 320 0xFFFFA.... - Kernel Stacks
+ if( i == 320 ) continue;
+ // 509 0xFFFFFE0.. - Fractal mapping
+ if( i == 508 ) continue;
+ // 510 0xFFFFFE8.. - Temp fractal mapping
+ if( i == 509 ) continue;
+
+ TMPMAPLVL4(i) = PAGEMAPLVL4(i);
+ if( TMPMAPLVL4(i) & 1 )
+ MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
+ }
+
+ // #5 Set fractal mapping
+ TMPMAPLVL4(508) = ret | 3;
+ TMPMAPLVL4(509) = 0; // Temp
+
+ // #6 Create kernel stack
+ // tThread->KernelStack is the top
+ // There is 1 guard page below the stack
+ kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
+
+// Log("MM_Clone: kstackbase = %p", kstackbase);
+
+ TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
+ for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
+ {
+ tPAddr phys = MM_AllocPhys();
+ tVAddr tmpmapping;
+ MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
+
+ Log_Debug("MM", "MM_Clone: Cloning stack page %p from %P to %P",
+ kstackbase+i*0x1000, MM_GetPhysAddr( kstackbase+i*0x1000 ), phys
+ );
+ tmpmapping = MM_MapTemp(phys);
+ if( MM_GetPhysAddr( kstackbase+i*0x1000 ) )
+ memcpy((void*)tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
+ else
+ memset((void*)tmpmapping, 0, 0x1000);
+// if( i == 0xF )
+// Debug_HexDump("MM_Clone: *tmpmapping = ", (void*)tmpmapping, 0x1000);
+ MM_FreeTemp(tmpmapping);
+ }
+
+// MAGIC_BREAK();
+
+ // #7 Return
+ TMPCR3() = 0;
+ INVLPG_ALL();
+ Mutex_Release(&glMM_TempFractalLock);
+// Log("MM_Clone: RETURN %P", ret);
+ return ret;
+}
+
+void MM_ClearUser(void)
+{
+ tVAddr addr = 0;
+ int pml4, pdpt, pd, pt;
+
+ for( pml4 = 0; pml4 < 256; pml4 ++ )
+ {
+ // Catch an un-allocated PML4 entry
+ if( !(PAGEMAPLVL4(pml4) & 1) ) {
+ addr += 1ULL << PML4_SHIFT;
+ continue ;
+ }
+
+ // Catch a large COW
+ if( (PAGEMAPLVL4(pml4) & PF_COW) ) {
+ addr += 1ULL << PML4_SHIFT;
+ }
+ else
+ {
+ // TODO: Large pages
+
+ // Child entries
+ for( pdpt = 0; pdpt < 512; pdpt ++ )
+ {
+ // Unallocated
+ if( !(PAGEDIRPTR(addr >> PDP_SHIFT) & 1) ) {
+ addr += 1ULL << PDP_SHIFT;
+ continue;
+ }
+
+ // Catch a large COW
+ if( (PAGEDIRPTR(addr >> PDP_SHIFT) & PF_COW) ) {
+ addr += 1ULL << PDP_SHIFT;
+ }
+ else {
+ // Child entries
+ for( pd = 0; pd < 512; pd ++ )
+ {
+ // Unallocated PDir entry
+ if( !(PAGEDIR(addr >> PDIR_SHIFT) & 1) ) {
+ addr += 1ULL << PDIR_SHIFT;
+ continue;
+ }
+
+ // COW Page Table
+ if( PAGEDIR(addr >> PDIR_SHIFT) & PF_COW ) {
+ addr += 1ULL << PDIR_SHIFT;
+ }
+ else
+ {
+ // TODO: Catch large pages
+
+ // Child entries
+ for( pt = 0; pt < 512; pt ++ )
+ {
+ // Free page
+ if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
+ MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
+ PAGETABLE(addr >> PTAB_SHIFT) = 0;
+ }
+ addr += 1ULL << 12;
+ }
+ }
+ // Free page table
+ MM_DerefPhys( PAGEDIR(addr >> PDIR_SHIFT) & PADDR_MASK );
+ PAGEDIR(addr >> PDIR_SHIFT) = 0;
+ }
+ }
+ // Free page directory
+ MM_DerefPhys( PAGEDIRPTR(addr >> PDP_SHIFT) & PADDR_MASK );
+ PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
+ }
+ }
+ // Free page directory pointer table (PML4 entry)
+ MM_DerefPhys( PAGEMAPLVL4(pml4) & PADDR_MASK );
+ PAGEMAPLVL4(pml4) = 0;
+ }
+}
+
+tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize)
+{
+ tVAddr ret;
+ int i;
+
+ // #1 Set temp fractal to PID0
+ Mutex_Acquire(&glMM_TempFractalLock);
+ TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
+
+ // #2 Scan for a free stack addresss < 2^47
+ for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
+ {
+ tPAddr *ptr;
+ if( MM_GetPageEntryPtr(ret, 1, 0, 0, &ptr) == 0 ) break;
+ if( !(*ptr & 1) ) break;
+ }
+ if( ret >= (1ULL << 47) ) {
+ Mutex_Release(&glMM_TempFractalLock);
+ return 0;
+ }
+
+ // #3 Map all save the last page in the range
+ // - This acts as as guard page, and doesn't cost us anything.
+ for( i = 0; i < KERNEL_STACK_SIZE/0x1000 - 1; i ++ )
+ {
+ tPAddr phys = MM_AllocPhys();
+ if(!phys) {
+ // TODO: Clean up
+ Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
+ return 0;
+ }
+ MM_MapEx(ret + i*0x1000, phys, 1, 0);
+ }
+
+ if( StackSize > 0x1000 ) {
+ Log_Error("MM", "MM_NewWorkerStack: StackSize(0x%x) > 0x1000, cbf handling", StackSize);
+ }
+ else {
+ tPAddr *ptr, paddr;
+ tVAddr tmp_addr;
+ MM_GetPageEntryPtr(ret + i*0x1000, 1, 0, 0, &ptr);
+ paddr = *ptr & ~0xFFF;
+ tmp_addr = MM_MapTemp(paddr);
+ memcpy( (void*)(tmp_addr + (0x1000 - StackSize)), StackData, StackSize );
+ MM_FreeTemp(tmp_addr);
+ }
+
+ Mutex_Release(&glMM_TempFractalLock);
+
+ return ret + i*0x1000;
+}
+
+/**
+ * \brief Allocate a new kernel stack
+ */
+tVAddr MM_NewKStack(void)
+{
+ tVAddr base = MM_KSTACK_BASE;
+ Uint i;
+ for( ; base < MM_KSTACK_TOP; base += KERNEL_STACK_SIZE )
+ {
+ if(MM_GetPhysAddr(base+KERNEL_STACK_SIZE-0x1000) != 0)
+ continue;
+
+ //Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
+ for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
+ {
+ if( !MM_Allocate(base+i) )
+ {
+ Log_Warning("MM", "MM_NewKStack - Allocation failed");
+ for( i -= 0x1000; i; i -= 0x1000)
+ MM_Deallocate(base+i);
+ return 0;
+ }
+ }
+
+ return base + KERNEL_STACK_SIZE;
+ }
+ Log_Warning("MM", "MM_NewKStack - No address space left\n");