#define INVLPG_ALL() __asm__ __volatile__ ("mov %cr3,%rax;\n\tmov %rax,%cr3;")
#define INVLPG_GLOBAL() __asm__ __volatile__ ("mov %cr4,%rax;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4;\n\txorl $0x80, %eax;\n\tmov %rax,%cr4")
+// TODO: INVLPG_ALL is expensive
+#define GET_TEMP_MAPPING(cr3) do { \
+ __ASM__("cli"); \
+ __AtomicTestSetLoop( (Uint *)&TMPCR3(), (cr3) | 3 ); \
+ INVLPG_ALL(); \
+} while(0)
+#define REL_TEMP_MAPPING() do { \
+ TMPCR3() = 0; \
+ __ASM__("sti"); \
+} while(0)
+
// === CONSTS ===
//tPAddr * const gaPageTable = MM_FRACTAL_BASE;
void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected);
//void MM_DumpTables(tVAddr Start, tVAddr End);
int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
+tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr);
int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
// int MM_Map(tVAddr VAddr, tPAddr PAddr);
void MM_Unmap(tVAddr VAddr);
int MM_GetPageEntry(tVAddr Addr, tPAddr *Phys, Uint *Flags);
// === GLOBALS ===
-tMutex glMM_TempFractalLock;
tShortSpinlock glMM_ZeroPage;
tPAddr gMM_ZeroPage;
// Print Stack Backtrace
Error_Backtrace(Regs->RIP, Regs->RBP);
- MM_DumpTables(0, -1);
+ //MM_DumpTables(0, -1);
return 1;
}
return (*ptr & PADDR_MASK) | (Addr & 0xFFF);
}
+/**
+ * \brief Get the address of a page from another addres space
+ * \return Refenced physical address (or 0 on error)
+ */
+tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr)
+{
+ GET_TEMP_MAPPING(Process->MemState.CR3);
+ tPAddr ret = 0;
+ tPAddr *ptr;
+ if(MM_GetPageEntryPtr((tVAddr)Addr, 1,0,0, &ptr) == 0) // Temp, NoAlloc, NotLarge
+ {
+ if( *ptr & 1 )
+ {
+ ret = (*ptr & ~0xFFF) | ((tVAddr)Addr & 0xFFF);
+ MM_RefPhys( ret );
+ }
+ }
+ REL_TEMP_MAPPING();
+ return ret;
+}
+
/**
* \brief Sets the flags on a page
*/
void *ret;
// Sanity Check
- if(MaxBits < 12 || !PhysAddr) return 0;
+ ASSERTCR(MaxBits, >=, 12, NULL);
// Fast Allocate
if(Pages == 1 && MaxBits >= PHYS_BITS)
{
phys = MM_AllocPhys();
- *PhysAddr = phys;
ret = MM_MapHWPages(phys, 1);
MM_DerefPhys(phys);
+ if(PhysAddr)
+ *PhysAddr = phys;
return ret;
}
// Allocated successfully, now map
ret = MM_MapHWPages(phys, Pages);
- *PhysAddr = phys;
+ if(PhysAddr)
+ *PhysAddr = phys;
// MapHWPages references the pages, so deref them back down to 1
for(;Pages--;phys+=0x1000)
MM_DerefPhys(phys);
{
const int max_slots = (MM_TMPMAP_END - MM_TMPMAP_BASE) / PAGE_SIZE;
tVAddr ret = MM_TMPMAP_BASE;
- int i;
- for( i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
+ for( int i = 0; i < max_slots; i ++, ret += PAGE_SIZE )
{
tPAddr *ent;
if( MM_GetPageEntryPtr( ret, 0, 1, 0, &ent) < 0 ) {
return 0;
}
+void *MM_MapTempFromProc(tProcess *Process, const void *VAddr)
+{
+ // Get paddr
+ tPAddr paddr = MM_GetPageFromAS(Process, VAddr);
+ if( paddr == 0 )
+ return NULL;
+ return MM_MapTemp(paddr);
+}
+
void MM_FreeTemp(void *Ptr)
{
MM_Deallocate(Ptr);
if(!ret) return 0;
// #2 Alter the fractal pointer
- Mutex_Acquire(&glMM_TempFractalLock);
- TMPCR3() = ret | 3;
- INVLPG_ALL();
+ GET_TEMP_MAPPING(ret);
// #3 Set Copy-On-Write to all user pages
if( Threads_GetPID() != 0 && !bNoUserCopy )
// MAGIC_BREAK();
// #7 Return
- TMPCR3() = 0;
- INVLPG_ALL();
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
// Log("MM_Clone: RETURN %P", ret);
return ret;
}
int i;
// #1 Set temp fractal to PID0
- Mutex_Acquire(&glMM_TempFractalLock);
- TMPCR3() = ((tPAddr)gInitialPML4 - KERNEL_BASE) | 3;
- INVLPG_ALL();
+ GET_TEMP_MAPPING( ((tPAddr)gInitialPML4 - KERNEL_BASE) );
// #2 Scan for a free stack addresss < 2^47
for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE)
if( !(*ptr & 1) ) break;
}
if( ret >= (1ULL << 47) ) {
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
return 0;
}
MM_FreeTemp(tmp_addr);
}
- TMPCR3() = 0;
- Mutex_Release(&glMM_TempFractalLock);
+ REL_TEMP_MAPPING();
return ret + i*0x1000;
}