#include <mm_phys.h>
#include <proc.h>
#include <hal_proc.h>
+#include <arch_int.h>
#define TAB 22
-#define KERNEL_STACKS 0xF0000000
-#define KERNEL_STACK_SIZE 0x00008000
-#define KERNEL_STACKS_END 0xFC000000
#define WORKER_STACKS 0x00100000 // Thread0 Only!
-#define WORKER_STACK_SIZE KERNEL_STACK_SIZE
+#define WORKER_STACK_SIZE MM_KERNEL_STACK_SIZE
#define WORKER_STACKS_END 0xB0000000
#define NUM_WORKER_STACKS ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
#define INVLPG(addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
+#define GET_TEMP_MAPPING(cr3) do { \
+ __ASM__("cli"); \
+ __AtomicTestSetLoop( (Uint *)gpTmpCR3, cr3 | 3 ); \
+} while(0)
+#define REL_TEMP_MAPPING() do { \
+ *gpTmpCR3 = 0; \
+ __ASM__("sti"); \
+} while(0)
+
typedef Uint32 tTabEnt;
// === IMPORTS ===
{
if( gaPageDir[ i ] ) continue;
// Skip stack tables, they are process unique
- if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) {
+ if( i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) {
gaPageDir[ i ] = 0;
continue;
}
for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
}
+
+ *gpTmpCR3 = 0;
}
/**
INVLPG( Addr & ~0xFFF );
return;
}
-
- __asm__ __volatile__ ("pushf; andw $0xFEFF, 0(%esp); popf");
+
+ // Disable instruction tracing
+ __ASM__("pushf; andw $0xFEFF, 0(%esp); popf");
Proc_GetCurThread()->bInstrTrace = 0;
// If it was a user, tell the thread handler
(ErrorCode&16?" (Instruction Fetch)":"")
);
Log_Warning("MMVirt", "Instruction %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
- __asm__ __volatile__ ("sti"); // Restart IRQs
+ __ASM__("sti"); // Restart IRQs
#if 1
Error_Backtrace(Regs->eip, Regs->ebp);
#endif
);
}
- Log("Code at %p accessed %p", Regs->eip, Addr);
+ Log("CPU %i - Code at %p accessed %p", GetCPUNum(), Regs->eip, Addr);
// Print Stack Backtrace
Error_Backtrace(Regs->eip, Regs->ebp);
-
+
+ #if 0
Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
if( gaPageDir[Addr>>22] & PF_PRESENT )
Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
-
+ #endif
//MM_DumpTables(0, -1);
// Register Dump
{
tPAddr paddr;
//ENTER("xVAddr", VAddr);
- //__asm__ __volatile__ ("xchg %bx,%bx");
+ //__ASM__("xchg %bx,%bx");
// Check if the directory is mapped
if( gaPageDir[ VAddr >> 22 ] == 0 )
{
*/
void MM_SetCR3(Uint CR3)
{
- __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
+ __ASM__("mov %0, %%cr3"::"r"(CR3));
}
/**
//ENTER("xVAddr xPAddr", VAddr, PAddr);
// Sanity check
if( PAddr & 0xFFF || VAddr & 0xFFF ) {
- Warning("MM_Map - Physical or Virtual Addresses are not aligned");
+ Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned");
//LEAVE('i', 0);
return 0;
}
INVLPG( gaPageDir );
}
+/**
+ * \brief Deallocate an address space
+ */
+void MM_ClearSpace(Uint32 CR3)
+{
+ int i, j;
+
+ if(CR3 == (*gpPageCR3 & ~0xFFF)) {
+ Log_Error("MMVirt", "Can't clear current address space");
+ return ;
+ }
+
+ if( MM_GetRefCount(CR3) > 1 ) {
+ MM_DerefPhys(CR3);
+ Log_Log("MMVirt", "CR3 %P is still referenced, not cleaning (but dereferenced)", CR3);
+ return ;
+ }
+
+ Log_Debug("MMVirt", "Clearing out address space 0x%x from 0x%x", CR3, *gpPageCR3);
+
+ GET_TEMP_MAPPING(CR3);
+ INVLPG( gaTmpDir );
+
+ for( i = 0; i < 1024; i ++ )
+ {
+ Uint32 *table = &gaTmpTable[i*1024];
+ if( !(gaTmpDir[i] & PF_PRESENT) )
+ continue ;
+
+ INVLPG( table );
+
+ if( i < 768 || (i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) )
+ {
+ for( j = 0; j < 1024; j ++ )
+ {
+ if( !(table[j] & 1) )
+ continue;
+ MM_DerefPhys( table[j] & ~0xFFF );
+ }
+ }
+
+ if( i != (PAGE_TABLE_ADDR >> 22) )
+ {
+ MM_DerefPhys( gaTmpDir[i] & ~0xFFF );
+ }
+ }
+
+
+ MM_DerefPhys( CR3 );
+
+ REL_TEMP_MAPPING();
+}
+
/**
* \fn tPAddr MM_Clone(void)
* \brief Clone the current address space
*/
-tPAddr MM_Clone(void)
+tPAddr MM_Clone(int bNoUserCopy)
{
Uint i, j;
- tVAddr ret;
+ tPAddr ret;
Uint page = 0;
- tVAddr kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
+ tVAddr kStackBase = Proc_GetCurThread()->KernelStack - MM_KERNEL_STACK_SIZE;
void *tmp;
- Mutex_Acquire( &glTempFractal );
-
// Create Directory Table
- *gpTmpCR3 = MM_AllocPhys() | 3;
- if( *gpTmpCR3 == 3 ) {
- *gpTmpCR3 = 0;
+ ret = MM_AllocPhys();
+ if( ret == 0 ) {
return 0;
}
+
+ // Map
+ GET_TEMP_MAPPING( ret );
INVLPG( gaTmpDir );
- //LOG("Allocated Directory (%x)", *gpTmpCR3);
memsetd( gaTmpDir, 0, 1024 );
- if( Threads_GetPID() != 0 )
+ if( Threads_GetPID() != 0 && !bNoUserCopy )
{
// Copy Tables
for( i = 0; i < 768; i ++)
gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
continue;
}
+ if( i == (TMP_TABLE_ADDR >> 22) ) {
+ gaTmpDir[ TMP_TABLE_ADDR >> 22 ] = 0;
+ continue ;
+ }
if( gaPageDir[i] == 0 ) {
gaTmpDir[i] = 0;
}
// Allocate kernel stack
- for(i = KERNEL_STACKS >> 22;
- i < KERNEL_STACKS_END >> 22;
- i ++ )
+ for(i = MM_KERNEL_STACKS >> 22; i < MM_KERNEL_STACKS_END >> 22; i ++ )
{
// Check if directory is allocated
if( (gaPageDir[i] & 1) == 0 ) {
}
// We don't care about other kernel stacks
- if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) {
+ if( ((i*1024+j)*4096 & ~(MM_KERNEL_STACK_SIZE-1)) != kStackBase ) {
gaTmpTable[i*1024+j] = 0;
continue;
}
}
}
- ret = *gpTmpCR3 & ~0xFFF;
- Mutex_Release( &glTempFractal );
+ REL_TEMP_MAPPING();
//LEAVE('x', ret);
return ret;
{
tVAddr base;
Uint i;
- for(base = KERNEL_STACKS; base < KERNEL_STACKS_END; base += KERNEL_STACK_SIZE)
+ for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE)
{
// Check if space is free
if(MM_GetPhysAddr(base) != 0) continue;
// Allocate
- //for(i = KERNEL_STACK_SIZE; i -= 0x1000 ; )
- for(i = 0; i < KERNEL_STACK_SIZE; i += 0x1000 )
+ //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; )
+ for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
{
if( MM_Allocate(base+i) == 0 )
{
// On error, print a warning and return error
Warning("MM_NewKStack - Out of memory");
// - Clean up
- //for( i += 0x1000 ; i < KERNEL_STACK_SIZE; i += 0x1000 )
+ //for( i += 0x1000 ; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
// MM_Deallocate(base+i);
return 0;
}
}
// Success
-// Log("MM_NewKStack - Allocated %p", base + KERNEL_STACK_SIZE);
- return base+KERNEL_STACK_SIZE;
+// Log("MM_NewKStack - Allocated %p", base + MM_KERNEL_STACK_SIZE);
+ return base+MM_KERNEL_STACK_SIZE;
}
// No stacks left
Log_Warning("MMVirt", "MM_NewKStack - No address space left");
base = WORKER_STACKS + base * WORKER_STACK_SIZE;
//Log(" MM_NewWorkerStack: base = 0x%x", base);
- // Acquire the lock for the temp fractal mappings
- Mutex_Acquire(&glTempFractal);
-
// Set the temp fractals to TID0's address space
- *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
- //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
+ GET_TEMP_MAPPING( ((Uint)gaInitPageDir - KERNEL_BASE) );
INVLPG( gaTmpDir );
-
// Check if the directory is mapped (we are assuming that the stacks
// will fit neatly in a directory)
//Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
page = MM_AllocPhys();
gaTmpTable[ (base + addr) >> 12 ] = page | 3;
}
- *gpTmpCR3 = 0;
- // Release the temp mapping lock
- Mutex_Release(&glTempFractal);
+
+ // Release temporary fractal
+ REL_TEMP_MAPPING();
// NOTE: Max of 1 page
// `page` is the last allocated page from the previious for loop