#define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
#define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
+// TODO: INVLPG_ALL is expensive
+#define GET_TEMP_MAPPING(cr3) do { \
+ __ASM__("cli"); \
+ __AtomicTestSetLoop( (Uint *)&TMPCR3(), (cr3) | 3 ); \
+ INVLPG_ALL(); \
+} while(0)
+#define REL_TEMP_MAPPING() do { \
+ TMPCR3() = 0; \
+ __ASM__("sti"); \
+} while(0)
+
// === CONSTS ===
//tPAddr * const gaPageTable = MM_FRACTAL_BASE;
void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected);
//void MM_DumpTables(tVAddr Start, tVAddr End);
int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
- int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
+tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr);
+ int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
// int MM_Map(tVAddr VAddr, tPAddr PAddr);
void MM_Unmap(tVAddr VAddr);
void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts);
int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
// === GLOBALS ===
-tMutex glMM_TempFractalLock;
+tShortSpinlock glMM_ZeroPage;
tPAddr gMM_ZeroPage;
// === CODE ===
// Print Stack Backtrace
Error_Backtrace(Regs->RIP, Regs->RBP);
- MM_DumpTables(0, -1);
+ //MM_DumpTables(0, -1);
return 1;
}
const tPAddr MASK = ~CHANGEABLE_BITS; // Physical address and access bits
tVAddr rangeStart = 0;
tPAddr expected = CHANGEABLE_BITS; // CHANGEABLE_BITS is used because it's not a vaild value
- tVAddr curPos;
- Uint page;
tPAddr expected_pml4 = PF_WRITE|PF_USER;
tPAddr expected_pdp = PF_WRITE|PF_USER;
tPAddr expected_pd = PF_WRITE|PF_USER;
End &= (1L << 48) - 1;
- Start >>= 12; End >>= 12;
+ Start >>= 12;
+ End >>= 12;
- for(page = Start, curPos = Start<<12;
- page < End;
- curPos += 0x1000, page++)
+ // `page` will not overflow, End is 48-12 bits
+ tVAddr curPos = Start << 12;
+ for(Uint page = Start; page <= End; curPos += 0x1000, page++)
{
//Debug("&PAGEMAPLVL4(%i page>>27) = %p", page>>27, &PAGEMAPLVL4(page>>27));
//Debug("&PAGEDIRPTR(%i page>>18) = %p", page>>18, &PAGEDIRPTR(page>>18));
* \param bTemp Use tempoary mappings
* \param bLarge Treat as a large page
*/
-int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
+int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
{
tPAddr *ent;
int rv;
ENTER("pVAddr PPAddr", VAddr, PAddr);
// Get page pointer (Allow allocating)
- rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
+ rv = MM_GetPageEntryPtr( (tVAddr)VAddr, bTemp, 1, bLarge, &ent);
if(rv < 0) LEAVE_RET('i', 0);
if( *ent & 1 ) LEAVE_RET('i', 0);
*ent = PAddr | 3;
- if( VAddr < 0x800000000000 )
+ if( (tVAddr)VAddr <= USER_MAX )
*ent |= PF_USER;
-
INVLPG( VAddr );
LEAVE('i', 1);
* \param VAddr Target virtual address
* \param PAddr Physical address of page
*/
-int MM_Map(tVAddr VAddr, tPAddr PAddr)
+int MM_Map(volatile void *VAddr, tPAddr PAddr)
{
return MM_MapEx(VAddr, PAddr, 0, 0);
}
// Check Page Dir
if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
- PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
+ tPAddr *ent = &PAGETABLE(VAddr >> PTAB_SHIFT);
+ *ent = 0;
INVLPG( VAddr );
}
/**
* \brief Allocate a block of memory at the specified virtual address
*/
-tPAddr MM_Allocate(tVAddr VAddr)
+tPAddr MM_Allocate(volatile void *VAddr)
{
tPAddr ret;
- ENTER("xVAddr", VAddr);
+ ENTER("pVAddr", VAddr);
// Ensure the tables are allocated before the page (keeps things neat)
- MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
+ MM_GetPageEntryPtr( (tVAddr)VAddr, 0, 1, 0, NULL );
// Allocate the page
ret = MM_AllocPhys();
return ret;
}
-tPAddr MM_AllocateZero(tVAddr VAddr)
+void MM_AllocateZero(volatile void *VAddr)
{
- tPAddr ret = gMM_ZeroPage;
-
- MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
+ // Ensure dir is populated
+ MM_GetPageEntryPtr((tVAddr)VAddr, 0, 1, 0, NULL);
- if(!gMM_ZeroPage) {
- ret = gMM_ZeroPage = MM_AllocPhys();
- MM_RefPhys(ret); // Don't free this please
- MM_Map(VAddr, ret);
- memset((void*)VAddr, 0, 0x1000);
+ if(!gMM_ZeroPage)
+ {
+ SHORTLOCK(&glMM_ZeroPage);
+ if( !gMM_ZeroPage )
+ {
+ gMM_ZeroPage = MM_AllocPhys();
+ MM_Map(VAddr, gMM_ZeroPage);
+ memset((void*)VAddr, 0, PAGE_SIZE);
+ }
+ SHORTREL(&glMM_ZeroPage);
}
- else {
- MM_Map(VAddr, ret);
+ else
+ {
+ MM_Map(VAddr, gMM_ZeroPage);
}
- MM_RefPhys(ret); // Refernce for this map
+ MM_RefPhys(gMM_ZeroPage); // Refernce for this map
MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
- return ret;
}
/**
* \brief Deallocate a page at a virtual address
*/
-void MM_Deallocate(tVAddr VAddr)
+void MM_Deallocate(volatile void *VAddr)
{
- tPAddr phys;
-
- phys = MM_GetPhysAddr( (void*)VAddr );
+ tPAddr phys = MM_GetPhysAddr( VAddr );
if(!phys) return ;
- MM_Unmap(VAddr);
+ MM_Unmap((tVAddr)VAddr);
MM_DerefPhys(phys);
}
return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
}
+/**
+ * \brief Get the address of a page from another addres space
+ * \return Refenced physical address (or 0 on error)
+ */
+tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr)
+{
+ GET_TEMP_MAPPING(Process->MemState.CR3);
+ tPAddr ret = 0;
+ tPAddr *ptr;
+ if(MM_GetPageEntryPtr((tVAddr)Addr, 1,0,0, &ptr) == 0) // Temp, NoAlloc, NotLarge
+ {
+ if( *ptr & 1 )
+ {
+ ret = (*ptr & ~0xFFF) | ((tVAddr)Addr & 0xFFF);
+ MM_RefPhys( ret );
+ }
+ }
+ REL_TEMP_MAPPING();
+ return ret;
+}
+
/**
* \brief Sets the flags on a page
*/
-void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
+void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask)
{
tPAddr *ent;
int rv;
// Get pointer
- rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
+ rv = MM_GetPageEntryPtr( (tVAddr)VAddr, 0, 0, 0, &ent);
if(rv < 0) return ;
// Ensure the entry is valid
if( Flags & MM_PFLAG_COW ) {
*ent &= ~PF_WRITE;
*ent |= PF_COW;
- INVLPG_ALL();
}
else {
*ent &= ~PF_COW;
/**
* \brief Get the flags applied to a page
*/
-Uint MM_GetFlags(tVAddr VAddr)
+Uint MM_GetFlags(volatile const void *VAddr)
{
tPAddr *ent;
int rv, ret = 0;
- rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
+ rv = MM_GetPageEntryPtr((tVAddr)VAddr, 0, 0, 0, &ent);
if(rv < 0) return 0;
if( !(*ent & 1) ) return 0;
*/
void *MM_MapHWPages(tPAddr PAddr, Uint Number)
{
- tVAddr ret;
- int num;
-
//TODO: Add speedups (memory of first possible free)
- for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
+ for( tPage *ret = (void*)MM_HWMAP_BASE; ret < (tPage*)MM_HWMAP_TOP; ret ++ )
{
- for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
+ // Check if this region has already been used
+ int num;
+ for( num = Number; num -- && ret < (tPage*)MM_HWMAP_TOP; ret ++ )
{
- if( MM_GetPhysAddr( (void*)ret ) != 0 )
+ if( MM_GetPhysAddr( ret ) != 0 )
break;
}
if( num >= 0 ) continue;
// Log_Debug("MMVirt", "Mapping %i pages to %p (base %P)", Number, ret-Number*0x1000, PAddr);
+ // Map backwards (because `ret` is at the top of the region atm)
PAddr += 0x1000 * Number;
-
while( Number -- )
{
- ret -= 0x1000;
+ ret --;
PAddr -= 0x1000;
MM_Map(ret, PAddr);
MM_RefPhys(PAddr);
}
- return (void*)ret;
+ return ret;
}
Log_Error("MM", "MM_MapHWPages - No space for %i pages", Number);
/**
* \brief Free a range of hardware pages
*/
-void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
+void MM_UnmapHWPages(volatile void *VAddr, Uint Number)
{
// Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
+ tPage *page = (void*)VAddr;
while( Number -- )
{
- MM_DerefPhys( MM_GetPhysAddr((void*)VAddr) );
- MM_Unmap(VAddr);
- VAddr += 0x1000;
+ MM_DerefPhys( MM_GetPhysAddr(page) );
+ MM_Unmap((tVAddr)page);
+ page ++;
}
}
void *ret;
// Sanity Check
- if(MaxBits < 12 || !PhysAddr) return 0;
+ ASSERTCR(MaxBits, >=, 12, NULL);
// Fast Allocate
if(Pages == 1 && MaxBits >= PHYS_BITS)
{
phys = MM_AllocPhys();
- *PhysAddr = phys;
ret = MM_MapHWPages(phys, 1);
MM_DerefPhys(phys);
+ if(PhysAddr)
+ *PhysAddr = phys;
return ret;
}
// Allocated successfully, now map
ret = MM_MapHWPages(phys, Pages);
- *PhysAddr = phys;
+ if(PhysAddr)
+ *PhysAddr = phys;
// MapHWPages references the pages, so deref them back down to 1
for(;Pages--;phys+=0x1000)
MM_DerefPhys(phys);
{
const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
tVAddr ret = MM_TMPMAP_BASE;
- int i;
- for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
+ for( int i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
{
tPAddr *ent;
if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
return 0;
}
+void *MM_MapTempFromProc(tProcess *Process, const void *VAddr)
+{
+ // Get paddr
+ tPAddr paddr = MM_GetPageFromAS(Process, VAddr);
+ if( paddr == 0 )
+ return NULL;
+ return MM_MapTemp(paddr);
+}
+
void MM_FreeTemp(void *Ptr)
{
- MM_Deallocate((tVAddr)Ptr);
- return ;
+ MM_Deallocate(Ptr);
}
{
tPAddr ret;
int i;
- tVAddr kstackbase;
// #1 Create a copy of the PML4
ret = MM_AllocPhys();
if(!ret) return 0;
// #2 Alter the fractal pointer
- Mutex_Acquire(&glMM_TempFractalLock);
- TMPCR3() = ret | 3;
- INVLPG_ALL();
+ GET_TEMP_MAPPING(ret);
// #3 Set Copy-On-Write to all user pages
if( Threads_GetPID() != 0 && !bNoUserCopy )
// #6 Create kernel stack
// tThread->KernelStack is the top
// There is 1 guard page below the stack
- kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
+ tPage *kstackbase = (void*)( Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE );
// Clone stack
TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
- for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
+ for( i = 1; i < KERNEL_STACK_SIZE/PAGE_SIZE; i ++ )
{
tPAddr phys = MM_AllocPhys();
void *tmpmapping;
- MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
+ MM_MapEx(kstackbase + i, phys, 1, 0);
tmpmapping = MM_MapTemp(phys);
- if( MM_GetPhysAddr( (void*)(kstackbase+i*0x1000) ) )
- memcpy(tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
+ // If the current thread's stack is shorter than the new one, zero
+ if( MM_GetPhysAddr( kstackbase + i ) )
+ memcpy(tmpmapping, kstackbase + i, 0x1000);
else
memset(tmpmapping, 0, 0x1000);
// if( i == 0xF )
// MAGIC_BREAK();
// #7 Return
- TMPCR3() = 0;
- INVLPG_ALL();
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
// Log("MM_Clone: RETURN %P", ret);
return ret;
}
int i;
// #1 Set temp fractal to PID0
- Mutex_Acquire(&glMM_TempFractalLock);
- TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
- INVLPG_ALL();
+ GET_TEMP_MAPPING( ((tPAddr)gInitialPML4 - KERNEL_BASE) );
// #2 Scan for a free stack addresss < 2^47
for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
if( !(*ptr & 1) ) break;
}
if( ret >= (1ULL << 47) ) {
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
return 0;
}
Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
return 0;
}
- MM_MapEx(ret + i*0x1000, phys, 1, 0);
- MM_SetFlags(ret + i*0x1000, MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL);
+ MM_MapEx( (void*)(ret + i*0x1000), phys, 1, 0);
+ // XXX: ... this doesn't change the correct address space
+ MM_SetFlags( (void*)(ret + i*0x1000), MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL);
}
// Copy data
MM_FreeTemp(tmp_addr);
}
- TMPCR3() = 0;
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
return ret + i*0x1000;
}
//Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
{
- if( !MM_Allocate(base+i) )
+ if( !MM_Allocate( (void*)(base+i) ) )
{
Log_Warning("MM", "MM_NewKStack - Allocation failed");
for( i -= 0x1000; i; i -= 0x1000)
- MM_Deallocate(base+i);
+ MM_Deallocate((void*)(base+i));
return 0;
}
}