X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;ds=sidebyside;f=Kernel%2Farch%2Fx86%2Fmm_virt.c;h=bd7b1dff66adb4474783db55a1d589fcf7d80857;hb=a2495c6ea4f4cab16b5d339ae511428e92e89e73;hp=970e957541562dec39c14c37c08a14b9e0a096e6;hpb=17aac974ab83a3521f2b49b8de33ae05a00fbe07;p=tpg%2Facess2.git diff --git a/Kernel/arch/x86/mm_virt.c b/Kernel/arch/x86/mm_virt.c index 970e9575..bd7b1dff 100644 --- a/Kernel/arch/x86/mm_virt.c +++ b/Kernel/arch/x86/mm_virt.c @@ -15,14 +15,13 @@ #include #include #include +#include +#include #define TAB 22 -#define KERNEL_STACKS 0xF0000000 -#define KERNEL_STACK_SIZE 0x00008000 -#define KERNEL_STACKS_END 0xFC000000 #define WORKER_STACKS 0x00100000 // Thread0 Only! -#define WORKER_STACK_SIZE KERNEL_STACK_SIZE +#define WORKER_STACK_SIZE MM_KERNEL_STACK_SIZE #define WORKER_STACKS_END 0xB0000000 #define NUM_WORKER_STACKS ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE) @@ -56,10 +55,19 @@ #define INVLPG(addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(addr)) +#define GET_TEMP_MAPPING(cr3) do { \ + __ASM__("cli"); \ + __AtomicTestSetLoop( (Uint *)gpTmpCR3, cr3 | 3 ); \ +} while(0) +#define REL_TEMP_MAPPING() do { \ + *gpTmpCR3 = 0; \ + __ASM__("sti"); \ +} while(0) + typedef Uint32 tTabEnt; // === IMPORTS === -extern void _UsertextEnd, _UsertextBase; +extern char _UsertextEnd[], _UsertextBase[]; extern Uint32 gaInitPageDir[1024]; extern Uint32 gaInitPageTable[1024]; extern void Threads_SegFault(tVAddr Addr); @@ -69,8 +77,8 @@ extern void Error_Backtrace(Uint eip, Uint ebp); void MM_PreinitVirtual(void); void MM_InstallVirtual(void); void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs); -void MM_DumpTables(tVAddr Start, tVAddr End); -tVAddr MM_ClearUser(void); +//void MM_DumpTables(tVAddr Start, tVAddr End); +//void MM_ClearUser(void); tPAddr MM_DuplicatePage(tVAddr VAddr); // === GLOBALS === @@ -124,7 +132,7 @@ void MM_InstallVirtual(void) { if( gaPageDir[ i ] ) continue; // Skip stack tables, they are process unique - if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) { + if( i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) { gaPageDir[ i ] = 0; continue; } @@ -138,6 +146,8 @@ void MM_InstallVirtual(void) for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) { MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL ); } + + *gpTmpCR3 = 0; } /** @@ -175,13 +185,14 @@ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE; } - Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]); +// Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]); INVLPG( Addr & ~0xFFF ); return; } - - __asm__ __volatile__ ("pushf; andw $0xFEFF, 0(%esp); popf"); + + // Disable instruction tracing + __ASM__("pushf; andw $0xFEFF, 0(%esp); popf"); Proc_GetCurThread()->bInstrTrace = 0; // If it was a user, tell the thread handler @@ -192,7 +203,7 @@ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) (ErrorCode&16?" (Instruction Fetch)":"") ); Log_Warning("MMVirt", "Instruction %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr); - __asm__ __volatile__ ("sti"); // Restart IRQs + __ASM__("sti"); // Restart IRQs #if 1 Error_Backtrace(Regs->eip, Regs->ebp); #endif @@ -214,14 +225,15 @@ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) ); } - Log("Code at %p accessed %p", Regs->eip, Addr); + Log("CPU %i - Code at %p accessed %p", GetCPUNum(), Regs->eip, Addr); // Print Stack Backtrace Error_Backtrace(Regs->eip, Regs->ebp); - + + #if 0 Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]); if( gaPageDir[Addr>>22] & PF_PRESENT ) Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]); - + #endif //MM_DumpTables(0, -1); // Register Dump @@ -330,7 +342,7 @@ tPAddr MM_Allocate(tVAddr VAddr) { tPAddr paddr; //ENTER("xVAddr", VAddr); - //__asm__ __volatile__ ("xchg %bx,%bx"); + //__ASM__("xchg %bx,%bx"); // Check if the directory is mapped if( gaPageDir[ VAddr >> 22 ] == 0 ) { @@ -415,7 +427,7 @@ tPAddr MM_GetPhysAddr(tVAddr Addr) */ void MM_SetCR3(Uint CR3) { - __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3)); + __ASM__("mov %0, %%cr3"::"r"(CR3)); } /** @@ -427,7 +439,7 @@ int MM_Map(tVAddr VAddr, tPAddr PAddr) //ENTER("xVAddr xPAddr", VAddr, PAddr); // Sanity check if( PAddr & 0xFFF || VAddr & 0xFFF ) { - Warning("MM_Map - Physical or Virtual Addresses are not aligned"); + Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned"); //LEAVE('i', 0); return 0; } @@ -475,10 +487,9 @@ int MM_Map(tVAddr VAddr, tPAddr PAddr) } /** - * \fn tVAddr MM_ClearUser() * \brief Clear user's address space */ -tVAddr MM_ClearUser(void) +void MM_ClearUser(void) { Uint i, j; @@ -504,35 +515,85 @@ tVAddr MM_ClearUser(void) INVLPG( &gaPageTable[i*1024] ); } INVLPG( gaPageDir ); +} + +/** + * \brief Deallocate an address space + */ +void MM_ClearSpace(Uint32 CR3) +{ + int i, j; + + if(CR3 == (*gpPageCR3 & ~0xFFF)) { + Log_Error("MMVirt", "Can't clear current address space"); + return ; + } + + if( MM_GetRefCount(CR3) > 1 ) { + MM_DerefPhys(CR3); + Log_Log("MMVirt", "CR3 %P is still referenced, not cleaning (but dereferenced)", CR3); + return ; + } + + Log_Debug("MMVirt", "Clearing out address space 0x%x from 0x%x", CR3, *gpPageCR3); - return *gpPageCR3; + GET_TEMP_MAPPING(CR3); + INVLPG( gaTmpDir ); + + for( i = 0; i < 1024; i ++ ) + { + Uint32 *table = &gaTmpTable[i*1024]; + if( !(gaTmpDir[i] & PF_PRESENT) ) + continue ; + + INVLPG( table ); + + if( i < 768 || (i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) ) + { + for( j = 0; j < 1024; j ++ ) + { + if( !(table[j] & 1) ) + continue; + MM_DerefPhys( table[j] & ~0xFFF ); + } + } + + if( i != (PAGE_TABLE_ADDR >> 22) ) + { + MM_DerefPhys( gaTmpDir[i] & ~0xFFF ); + } + } + + + MM_DerefPhys( CR3 ); + + REL_TEMP_MAPPING(); } /** * \fn tPAddr MM_Clone(void) * \brief Clone the current address space */ -tPAddr MM_Clone(void) +tPAddr MM_Clone(int bNoUserCopy) { Uint i, j; - tVAddr ret; + tPAddr ret; Uint page = 0; - tVAddr kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE; + tVAddr kStackBase = Proc_GetCurThread()->KernelStack - MM_KERNEL_STACK_SIZE; void *tmp; - Mutex_Acquire( &glTempFractal ); - // Create Directory Table - *gpTmpCR3 = MM_AllocPhys() | 3; - if( *gpTmpCR3 == 3 ) { - *gpTmpCR3 = 0; + ret = MM_AllocPhys(); + if( ret == 0 ) { return 0; } + + // Map + GET_TEMP_MAPPING( ret ); INVLPG( gaTmpDir ); - //LOG("Allocated Directory (%x)", *gpTmpCR3); memsetd( gaTmpDir, 0, 1024 ); - if( Threads_GetPID() != 0 ) + if( Threads_GetPID() != 0 && !bNoUserCopy ) { // Copy Tables for( i = 0; i < 768; i ++) @@ -577,6 +638,10 @@ tPAddr MM_Clone(void) gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3; continue; } + if( i == (TMP_TABLE_ADDR >> 22) ) { + gaTmpDir[ TMP_TABLE_ADDR >> 22 ] = 0; + continue ; + } if( gaPageDir[i] == 0 ) { gaTmpDir[i] = 0; @@ -589,9 +654,7 @@ tPAddr MM_Clone(void) } // Allocate kernel stack - for(i = KERNEL_STACKS >> 22; - i < KERNEL_STACKS_END >> 22; - i ++ ) + for(i = MM_KERNEL_STACKS >> 22; i < MM_KERNEL_STACKS_END >> 22; i ++ ) { // Check if directory is allocated if( (gaPageDir[i] & 1) == 0 ) { @@ -618,7 +681,7 @@ tPAddr MM_Clone(void) } // We don't care about other kernel stacks - if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) { + if( ((i*1024+j)*4096 & ~(MM_KERNEL_STACK_SIZE-1)) != kStackBase ) { gaTmpTable[i*1024+j] = 0; continue; } @@ -634,8 +697,7 @@ tPAddr MM_Clone(void) } } - ret = *gpTmpCR3 & ~0xFFF; - Mutex_Release( &glTempFractal ); + REL_TEMP_MAPPING(); //LEAVE('x', ret); return ret; @@ -649,30 +711,30 @@ tVAddr MM_NewKStack(void) { tVAddr base; Uint i; - for(base = KERNEL_STACKS; base < KERNEL_STACKS_END; base += KERNEL_STACK_SIZE) + for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE) { // Check if space is free if(MM_GetPhysAddr(base) != 0) continue; // Allocate - //for(i = KERNEL_STACK_SIZE; i -= 0x1000 ; ) - for(i = 0; i < KERNEL_STACK_SIZE; i += 0x1000 ) + //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; ) + for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 ) { if( MM_Allocate(base+i) == 0 ) { // On error, print a warning and return error Warning("MM_NewKStack - Out of memory"); // - Clean up - //for( i += 0x1000 ; i < KERNEL_STACK_SIZE; i += 0x1000 ) + //for( i += 0x1000 ; i < MM_KERNEL_STACK_SIZE; i += 0x1000 ) // MM_Deallocate(base+i); return 0; } } // Success - Log("MM_NewKStack - Allocated %p", base + KERNEL_STACK_SIZE); - return base+KERNEL_STACK_SIZE; +// Log("MM_NewKStack - Allocated %p", base + MM_KERNEL_STACK_SIZE); + return base+MM_KERNEL_STACK_SIZE; } // No stacks left - Warning("MM_NewKStack - No address space left"); + Log_Warning("MMVirt", "MM_NewKStack - No address space left"); return 0; } @@ -715,15 +777,10 @@ tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) base = WORKER_STACKS + base * WORKER_STACK_SIZE; //Log(" MM_NewWorkerStack: base = 0x%x", base); - // Acquire the lock for the temp fractal mappings - Mutex_Acquire(&glTempFractal); - // Set the temp fractals to TID0's address space - *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3; - //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3); + GET_TEMP_MAPPING( ((Uint)gaInitPageDir - KERNEL_BASE) ); INVLPG( gaTmpDir ); - // Check if the directory is mapped (we are assuming that the stacks // will fit neatly in a directory) //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]); @@ -738,9 +795,9 @@ tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) page = MM_AllocPhys(); gaTmpTable[ (base + addr) >> 12 ] = page | 3; } - *gpTmpCR3 = 0; - // Release the temp mapping lock - Mutex_Release(&glTempFractal); + + // Release temporary fractal + REL_TEMP_MAPPING(); // NOTE: Max of 1 page // `page` is the last allocated page from the previious for loop @@ -829,6 +886,45 @@ Uint MM_GetFlags(tVAddr VAddr) return ret; } +/** + * \brief Check if the provided buffer is valid + * \return Boolean valid + */ +int MM_IsValidBuffer(tVAddr Addr, size_t Size) +{ + int bIsUser; + int dir, tab; + + Size += Addr & (PAGE_SIZE-1); + Addr &= ~(PAGE_SIZE-1); + + dir = Addr >> 22; + tab = Addr >> 12; + +// Debug("Addr = %p, Size = 0x%x, dir = %i, tab = %i", Addr, Size, dir, tab); + + if( !(gaPageDir[dir] & 1) ) return 0; + if( !(gaPageTable[tab] & 1) ) return 0; + + bIsUser = !!(gaPageTable[tab] & PF_USER); + + while( Size >= PAGE_SIZE ) + { + if( (tab & 1023) == 0 ) + { + dir ++; + if( !(gaPageDir[dir] & 1) ) return 0; + } + + if( !(gaPageTable[tab] & 1) ) return 0; + if( bIsUser && !(gaPageTable[tab] & PF_USER) ) return 0; + + tab ++; + Size -= PAGE_SIZE; + } + return 1; +} + /** * \fn tPAddr MM_DuplicatePage(tVAddr VAddr) * \brief Duplicates a virtual page to a physical one