#define DEBUG 0
#include <acess.h>
#include <mm_virt.h>
+#include <threads_int.h>
#include <proc.h>
// === CONSTANTS ===
#define PAGEDIRPTR(idx) PAGEDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
#define PAGEMAPLVL4(idx) PAGEDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
+#define TMPCR3() PAGEMAPLVL4(MM_TMPFRAC_BASE>>39)
#define TMPTABLE(idx) (*((tPAddr*)MM_TMPFRAC_BASE+((idx)&PAGE_MASK)))
-#define TMPDIR(idx) TMPTABLE((MM_FRACTAL_BASE>>12)+((idx)&TABLE_MASK))
-#define TMPDIRPTR(idx) TMPDIR((MM_FRACTAL_BASE>>21)+((idx)&PDP_MASK))
-#define TMPMAPLVL4(idx) TMPDIRPTR((MM_FRACTAL_BASE>>30)+((idx)&PML4_MASK))
-#define TMPCR3() PAGETABLE(MM_TMPFRAC_BASE>>12)
+#define TMPDIR(idx) PAGETABLE((MM_TMPFRAC_BASE>>12)+((idx)&TABLE_MASK))
+#define TMPDIRPTR(idx) PAGEDIR((MM_TMPFRAC_BASE>>21)+((idx)&PDP_MASK))
+#define TMPMAPLVL4(idx) PAGEDIRPTR((MM_TMPFRAC_BASE>>30)+((idx)&PML4_MASK))
-#define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr));
+#define INVLPG(__addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(__addr))
+#define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
+#define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
// === CONSTS ===
//tPAddr * const gaPageTable = MM_FRACTAL_BASE;
-// === EXTERNS ===
+// === IMPORTS ===
+extern void Error_Backtrace(Uint IP, Uint BP);
extern tPAddr gInitialPML4[512];
// === PROTOTYPES ===
void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
void MM_DumpTables(tVAddr Start, tVAddr End);
int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
+ int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
// int MM_Map(tVAddr VAddr, tPAddr PAddr);
void MM_Unmap(tVAddr VAddr);
void MM_ClearUser(void);
void MM_FinishVirtualInit(void)
{
+ PAGEMAPLVL4(0) = 0;
}
/**
&& gaPageTable[Addr>>12] & PF_COW )
{
tPAddr paddr;
- if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
+ if(MM_GetRefCount( gaPageTable[Addr>>12] & PADDR_MASK ) == 1)
{
gaPageTable[Addr>>12] &= ~PF_COW;
gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
{
//Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
paddr = MM_DuplicatePage( Addr );
- MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
+ MM_DerefPhys( gaPageTable[Addr>>12] & PADDR_MASK );
gaPageTable[Addr>>12] &= PF_USER;
gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
}
Log("Code at %p accessed %p", Regs->RIP, Addr);
// Print Stack Backtrace
-// Error_Backtrace(Regs->RIP, Regs->RBP);
+ Error_Backtrace(Regs->RIP, Regs->RBP);
MM_DumpTables(0, -1);
*/
void MM_DumpTables(tVAddr Start, tVAddr End)
{
+ #define CANOICAL(addr) ((addr)&0x800000000000?(addr)|0xFFFF000000000000:(addr))
const tPAddr CHANGEABLE_BITS = 0xFF8;
const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
tVAddr rangeStart = 0;
- tPAddr expected = CHANGEABLE_BITS; // MASK is used because it's not a vaild value
+ tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
tVAddr curPos;
Uint page;
|| (PAGETABLE(page) & MASK) != expected)
{
if(expected != CHANGEABLE_BITS) {
- Log("%016x-0x%016x => %013x-%013x (%c%c%c%c)",
- rangeStart, curPos - 1,
- PAGETABLE(rangeStart>>12) & ~0xFFF,
- (expected & ~0xFFF) - 1,
+ Log("%016llx => %013llx : 0x%6llx (%c%c%c%c)",
+ CANOICAL(rangeStart),
+ PAGETABLE(rangeStart>>12) & PADDR_MASK,
+ curPos - rangeStart,
(expected & PF_PAGED ? 'p' : '-'),
(expected & PF_COW ? 'C' : '-'),
(expected & PF_USER ? 'U' : '-'),
}
if(expected != CHANGEABLE_BITS) {
- Log("%016x-%016x => %013x-%013x (%s%s%s%s)",
- rangeStart, curPos - 1,
- PAGETABLE(rangeStart>>12) & ~0xFFF,
- (expected & ~0xFFF) - 1,
- (expected & PF_PAGED ? "p" : "-"),
- (expected & PF_COW ? "C" : "-"),
- (expected & PF_USER ? "U" : "-"),
- (expected & PF_WRITE ? "W" : "-")
+ Log("%016llx => %013llx : 0x%6llx (%c%c%c%c)",
+ CANOICAL(rangeStart),
+ PAGETABLE(rangeStart>>12) & PADDR_MASK,
+ curPos - rangeStart,
+ (expected & PF_PAGED ? 'p' : '-'),
+ (expected & PF_COW ? 'C' : '-'),
+ (expected & PF_USER ? 'U' : '-'),
+ (expected & PF_WRITE ? 'W' : '-')
);
expected = 0;
}
+ #undef CANOICAL
}
/**
int i;
if( bTemp )
- pmlevels[3] = (void*)MM_TMPFRAC_BASE; // Temporary Page Table
+ {
+ pmlevels[3] = &TMPTABLE(0); // Page Table
+ pmlevels[2] = &TMPDIR(0); // PDIR
+ pmlevels[1] = &TMPDIRPTR(0); // PDPT
+ pmlevels[0] = &TMPMAPLVL4(0); // PML4
+ }
else
+ {
pmlevels[3] = (void*)MM_FRACTAL_BASE; // Page Table
- pmlevels[2] = &pmlevels[3][(MM_FRACTAL_BASE>>12)&PAGE_MASK]; // PDIR
- pmlevels[1] = &pmlevels[2][(MM_FRACTAL_BASE>>21)&TABLE_MASK]; // PDPT
- pmlevels[0] = &pmlevels[1][(MM_FRACTAL_BASE>>30)&PDP_MASK]; // PML4
-// Log("pmlevels = {%p, %p, %p, %p}",
-// MM_FRACTAL_BASE>>30, MM_FRACTAL_BASE>>21, MM_FRACTAL_BASE>>12, MM_FRACTAL_BASE);
-// Log("pmlevels = {%p, %p, %p, %p}",
-// pmlevels[0], pmlevels[1], pmlevels[2], pmlevels[3]);
+ pmlevels[2] = &pmlevels[3][(MM_FRACTAL_BASE>>12)&PAGE_MASK]; // PDIR
+ pmlevels[1] = &pmlevels[2][(MM_FRACTAL_BASE>>21)&TABLE_MASK]; // PDPT
+ pmlevels[0] = &pmlevels[1][(MM_FRACTAL_BASE>>30)&PDP_MASK]; // PML4
+ }
// Mask address
Addr &= (1ULL << 48)-1;
for( i = 0; i < nADDR_SIZES-1; i ++ )
{
+// INVLPG( &pmlevels[i][ (Addr >> ADDR_SIZES[i]) &
// Check for a large page
if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) == 0 && bLargePage )
if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
return ADDR_SIZES[i];
}
-// Log("&pmlevels[%i][0x%llx (>> %i)] = %p", i, Addr >> ADDR_SIZES[i], ADDR_SIZES[i],
-// &pmlevels[i][Addr >> ADDR_SIZES[i]]);
// Allocate an entry if required
if( !(pmlevels[i][Addr >> ADDR_SIZES[i]] & 1) )
{
tmp = MM_AllocPhys();
if(!tmp) return -2;
pmlevels[i][Addr >> ADDR_SIZES[i]] = tmp | 3;
- INVLPG( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])<<9 ] );
- memset( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])<<9 ], 0, 0x1000 );
+ INVLPG( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])*512 ] );
+ memset( &pmlevels[i+1][ (Addr>>ADDR_SIZES[i])*512 ], 0, 0x1000 );
}
+ // Catch large pages
else if( pmlevels[i][Addr >> ADDR_SIZES[i]] & PF_LARGE )
{
- if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) != 0 )
- return -3; // Alignment
+ // Alignment
+ if( (Addr & ((1ULL << ADDR_SIZES[i])-1)) != 0 ) return -3;
if(Pointer) *Pointer = &pmlevels[i][Addr >> ADDR_SIZES[i]];
return ADDR_SIZES[i]; // Large page warning
}
/**
* \brief Map a physical page to a virtual one
+ * \param VAddr Target virtual address
+ * \param PAddr Physical address of page
+ * \param bTemp Use tempoary mappings
+ * \param bLarge Treat as a large page
*/
-int MM_Map(tVAddr VAddr, tPAddr PAddr)
+int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
{
tPAddr *ent;
int rv;
ENTER("xVAddr xPAddr", VAddr, PAddr);
// Get page pointer (Allow allocating)
- rv = MM_GetPageEntryPtr(VAddr, 0, 1, 0, &ent);
+ rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
if(rv < 0) LEAVE_RET('i', 0);
- if( *ent & 1 )
- LEAVE_RET('i', 0);
+ if( *ent & 1 ) LEAVE_RET('i', 0);
*ent = PAddr | 3;
return 1;
}
+/**
+ * \brief Map a physical page to a virtual one
+ * \param VAddr Target virtual address
+ * \param PAddr Physical address of page
+ */
+int MM_Map(tVAddr VAddr, tPAddr PAddr)
+{
+ return MM_MapEx(VAddr, PAddr, 0, 0);
+}
+
/**
* \brief Removed a mapped page
*/
ENTER("xVAddr", VAddr);
- // NOTE: This is hack, but I like my dumps to be neat
- #if 1
+ // Ensure the tables are allocated before the page (keeps things neat)
MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
- #elif 1
- if( !MM_Map(VAddr, 0) ) // Make sure things are allocated
- {
- Warning("MM_Allocate: Unable to map, tables did not initialise");
- LEAVE('i', 0);
- return 0;
- }
- MM_Unmap(VAddr);
- #endif
+ // Allocate the page
ret = MM_AllocPhys();
LOG("ret = %x", ret);
- if(!ret) {
- LEAVE('i', 0);
- return 0;
- }
+ if(!ret) LEAVE_RET('i', 0);
if( !MM_Map(VAddr, ret) )
{
return 0;
}
- LEAVE('x', ret);
+ LEAVE('X', ret);
return ret;
}
ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
if( ret < 0 ) return 0;
- *Phys = *ptr & ~0xFFF;
+ *Phys = *ptr & PADDR_MASK;
*Flags = *ptr & 0xFFF;
return ret;
}
ret = MM_GetPageEntryPtr(Addr, 0, 0, 0, &ptr);
if( ret < 0 ) return 0;
- return (*ptr & ~0xFFF) | (Addr & 0xFFF);
+ return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
}
/**
}
if( num >= 0 ) continue;
+ PAddr += 0x1000 * Number;
+
while( Number -- )
{
ret -= 0x1000;
+ PAddr -= 0x1000;
MM_Map(ret, PAddr);
- PAddr += 0x1000;
}
return ret;
tPAddr MM_Clone(void)
{
tPAddr ret;
+ int i;
+ tVAddr kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE + 0x1000;
// #1 Create a copy of the PML4
ret = MM_AllocPhys();
// #2 Alter the fractal pointer
Mutex_Acquire(&glMM_TempFractalLock);
TMPCR3() = ret | 3;
+ INVLPG_ALL();
- INVLPG(TMPMAPLVL4(0));
- memcpy(&TMPMAPLVL4(0), &PAGEMAPLVL4(0), 0x1000);
-
- Log_KernelPanic("MM", "TODO: Implement MM_Clone");
+// Log_KernelPanic("MM", "TODO: Implement MM_Clone");
// #3 Set Copy-On-Write to all user pages
- // #4 Return
+ for( i = 0; i < 256; i ++)
+ {
+ TMPMAPLVL4(i) = PAGEMAPLVL4(i);
+// Log_Debug("MM", "TMPMAPLVL4(%i) = 0x%016llx", i, TMPMAPLVL4(i));
+ if( TMPMAPLVL4(i) & 1 )
+ {
+ MM_RefPhys( TMPMAPLVL4(i) & PADDR_MASK );
+ TMPMAPLVL4(i) |= PF_COW;
+ TMPMAPLVL4(i) &= ~PF_WRITE;
+ }
+ }
+
+ // #4 Map in kernel pages
+ for( i = 256; i < 512; i ++ )
+ {
+ // Skip addresses:
+ // 320 0xFFFFA.... - Kernel Stacks
+ if( i == 320 ) continue;
+ // 509 0xFFFFFE0.. - Fractal mapping
+ if( i == 509 ) continue;
+ // 510 0xFFFFFE8.. - Temp fractal mapping
+ if( i == 510 ) continue;
+ }
+
+ // #5 Set fractal mapping
+ TMPMAPLVL4(509) = ret | 3;
+ TMPMAPLVL4(510) = 0; // Temp
+
+ // #6 Create kernel stack
+ TMPMAPLVL4(320) = 0;
+ for( i = 0; i < KERNEL_STACK_SIZE/0x1000-1; i ++ )
+ {
+ tPAddr phys = MM_AllocPhys();
+ tVAddr tmpmapping;
+ MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
+
+ tmpmapping = MM_MapTemp(phys);
+ memcpy((void*)tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
+ MM_FreeTemp(tmpmapping);
+ }
+
+ // #7 Return
TMPCR3() = 0;
- INVLPG(TMPMAPLVL4(0));
+ INVLPG_ALL();
Mutex_Release(&glMM_TempFractalLock);
- return 0;
+ return ret;
}
void MM_ClearUser(void)
{
tVAddr addr = 0;
- // #1 Traverse the structure < 2^47, Deref'ing all pages
- // #2 Free tables/dirs/pdps once they have been cleared
+ int pml4, pdpt, pd, pt;
- for( addr = 0; addr < 0x800000000000; )
+ for( pml4 = 0; pml4 < 256; pml4 ++ )
{
- if( PAGEMAPLVL4(addr >> PML4_SHIFT) & 1 )
+ // Catch an un-allocated PML4 entry
+ if( !(PAGEMAPLVL4(pml4) & 1) ) {
+ addr += 1ULL << PML4_SHIFT;
+ continue ;
+ }
+
+ // Catch a large COW
+ if( (PAGEMAPLVL4(pml4) & PF_COW) ) {
+ addr += 1ULL << PML4_SHIFT;
+ }
+ else
{
- if( PAGEDIRPTR(addr >> PDP_SHIFT) & 1 )
+ // TODO: Large pages
+
+ // Child entries
+ for( pdpt = 0; pdpt < 512; pdpt ++ )
{
- if( PAGEDIR(addr >> PDIR_SHIFT) & 1 )
- {
- // Page
- if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
- MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
- PAGETABLE(addr >> PTAB_SHIFT) = 0;
- }
- addr += 1 << PTAB_SHIFT;
- // Dereference the PDIR Entry
- if( (addr + (1 << PTAB_SHIFT)) >> PDIR_SHIFT != (addr >> PDIR_SHIFT) ) {
- MM_DerefPhys( PAGEMAPLVL4(addr >> PDIR_SHIFT) & PADDR_MASK );
- PAGEDIR(addr >> PDIR_SHIFT) = 0;
- }
- }
- else {
- addr += 1 << PDIR_SHIFT;
+ // Unallocated
+ if( !(PAGEDIRPTR(addr >> PDP_SHIFT) & 1) ) {
+ addr += 1ULL << PDP_SHIFT;
continue;
}
- // Dereference the PDP Entry
- if( (addr + (1 << PDIR_SHIFT)) >> PDP_SHIFT != (addr >> PDP_SHIFT) ) {
- MM_DerefPhys( PAGEMAPLVL4(addr >> PDP_SHIFT) & PADDR_MASK );
- PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
+
+ // Catch a large COW
+ if( (PAGEDIRPTR(addr >> PDP_SHIFT) & PF_COW) ) {
+ addr += 1ULL << PDP_SHIFT;
}
- }
- else {
- addr += 1 << PDP_SHIFT;
- continue;
- }
- // Dereference the PML4 Entry
- if( (addr + (1 << PDP_SHIFT)) >> PML4_SHIFT != (addr >> PML4_SHIFT) ) {
- MM_DerefPhys( PAGEMAPLVL4(addr >> PML4_SHIFT) & PADDR_MASK );
- PAGEMAPLVL4(addr >> PML4_SHIFT) = 0;
+ else {
+ // Child entries
+ for( pd = 0; pd < 512; pd ++ )
+ {
+ // Unallocated PDir entry
+ if( !(PAGEDIR(addr >> PDIR_SHIFT) & 1) ) {
+ addr += 1ULL << PDIR_SHIFT;
+ continue;
+ }
+
+ // COW Page Table
+ if( PAGEDIR(addr >> PDIR_SHIFT) & PF_COW ) {
+ addr += 1ULL << PDIR_SHIFT;
+ }
+ else
+ {
+ // TODO: Catch large pages
+
+ // Child entries
+ for( pt = 0; pt < 512; pt ++ )
+ {
+ // Free page
+ if( PAGETABLE(addr >> PTAB_SHIFT) & 1 ) {
+ MM_DerefPhys( PAGETABLE(addr >> PTAB_SHIFT) & PADDR_MASK );
+ PAGETABLE(addr >> PTAB_SHIFT) = 0;
+ }
+ addr += 1ULL << 12;
+ }
+ }
+ // Free page table
+ MM_DerefPhys( PAGEDIR(addr >> PDIR_SHIFT) & PADDR_MASK );
+ PAGEDIR(addr >> PDIR_SHIFT) = 0;
+ }
+ }
+ // Free page directory
+ MM_DerefPhys( PAGEDIRPTR(addr >> PDP_SHIFT) & PADDR_MASK );
+ PAGEDIRPTR(addr >> PDP_SHIFT) = 0;
}
}
- else {
- addr += (tVAddr)1 << PML4_SHIFT;
- continue;
- }
+ // Free page directory pointer table (PML4 entry)
+ MM_DerefPhys( PAGEMAPLVL4(pml4) & PADDR_MASK );
+ PAGEMAPLVL4(pml4) = 0;
}
}
tVAddr ret;
int i;
- Log_KernelPanic("MM", "TODO: Implement MM_NewWorkerStack");
-
// #1 Set temp fractal to PID0
Mutex_Acquire(&glMM_TempFractalLock);
TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
// - This acts as as guard page, and doesn't cost us anything.
for( i = 0; i < KERNEL_STACK_SIZE/0x1000 - 1; i ++ )
{
-// MM_MapTemp
+ tPAddr phys = MM_AllocPhys();
+ if(!phys) {
+ // TODO: Clean up
+ Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
+ return 0;
+ }
+ MM_MapEx(ret + i*0x1000, phys, 1, 0);
}
Mutex_Release(&glMM_TempFractalLock);
- return 0;
+ return ret + i*0x1000;
}
/**