X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;f=KernelLand%2FKernel%2Farch%2Fx86%2Fmm_virt.c;h=01843d9c92996bb280c9957d9218b92862e8a557;hb=7e9bbefbdcbfdba27eb6cdacae0811f428483892;hp=d15c3d72773e9eda4ddb5abf4519e1995be8e9be;hpb=9dccbc6f16485ea62caa8c4153f6f878da8cbb0d;p=tpg%2Facess2.git diff --git a/KernelLand/Kernel/arch/x86/mm_virt.c b/KernelLand/Kernel/arch/x86/mm_virt.c index d15c3d72..01843d9c 100644 --- a/KernelLand/Kernel/arch/x86/mm_virt.c +++ b/KernelLand/Kernel/arch/x86/mm_virt.c @@ -17,41 +17,28 @@ #include #include #include +#include + +#include "include/vmem_layout.h" + +#define TRACE_MAPS 0 + +#define KWATCH_BUCKETS 512 #define TAB 22 -#define WORKER_STACKS 0x00100000 // Thread0 Only! -#define WORKER_STACK_SIZE MM_KERNEL_STACK_SIZE -#define WORKER_STACKS_END 0xB0000000 -#define NUM_WORKER_STACKS ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE) - -#define PAE_PAGE_TABLE_ADDR 0xFC000000 // 16 MiB -#define PAE_PAGE_DIR_ADDR 0xFCFC0000 // 16 KiB -#define PAE_PAGE_PDPT_ADDR 0xFCFC3F00 // 32 bytes -#define PAE_TMP_PDPT_ADDR 0xFCFC3F20 // 32 bytes -#define PAE_TMP_DIR_ADDR 0xFCFE0000 // 16 KiB -#define PAE_TMP_TABLE_ADDR 0xFD000000 // 16 MiB - -#define PAGE_TABLE_ADDR 0xFC000000 -#define PAGE_DIR_ADDR 0xFC3F0000 -#define PAGE_CR3_ADDR 0xFC3F0FC0 -#define TMP_CR3_ADDR 0xFC3F0FC4 // Part of core instead of temp -#define TMP_DIR_ADDR 0xFC3F1000 // Same -#define TMP_TABLE_ADDR 0xFC400000 - -#define HW_MAP_ADDR 0xFE000000 -#define HW_MAP_MAX 0xFFEF0000 -#define NUM_HW_PAGES ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000) -#define TEMP_MAP_ADDR 0xFFEF0000 // Allows 16 "temp" pages -#define NUM_TEMP_PAGES 16 -#define LAST_BLOCK_ADDR 0xFFFF0000 // Free space for kernel provided user code/ *(-1) protection - -#define PF_PRESENT 0x1 -#define PF_WRITE 0x2 -#define PF_USER 0x4 -#define PF_GLOBAL 0x80 -#define PF_COW 0x200 -#define PF_NOPAGE 0x400 +#define PF_PRESENT 0x01 +#define PF_WRITE 0x02 +#define PF_USER 0x04 +#define PF_PAGEWT 0x08 // Page-level write through +#define PF_PAGECD 0x10 // Page-level cache disable +#define PF_ACCESSED 0x20 +#define PF_DIRTY 0x40 +#define PF_PAT 0x80 // ? +#define PF_GLOBAL 0x100 // Global Page +#define PF_COW 0x200 // [ 9] Ignored - Copy-on-write +#define PF_NOPAGE 0x400 // [10] Ignored - Disable page-out +#define PF_WATCHED 0x800 // [11] Ignored - Watchpointing enabled #define INVLPG(addr) __asm__ __volatile__ ("invlpg (%0)"::"r"(addr)) @@ -67,17 +54,27 @@ typedef Uint32 tTabEnt; // === IMPORTS === -extern char _UsertextEnd[], _UsertextBase[]; +extern tPage _UsertextEnd; +extern tPage _UsertextBase; +extern tPage gKernelEnd; // defined as page aligned extern Uint32 gaInitPageDir[1024]; extern Uint32 gaInitPageTable[1024]; extern void Threads_SegFault(tVAddr Addr); -extern void Error_Backtrace(Uint eip, Uint ebp); + +typedef struct sWatchpoint +{ + struct sWatchpoint *Next; + Uint PageNum; + Uint8 Bitmap[PAGE_SIZE/4/8]; +} tWatchpoint; // === PROTOTYPES === void MM_PreinitVirtual(void); void MM_InstallVirtual(void); void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs); +void MM_DumpTables_Print(tVAddr Start, Uint32 Orig, size_t Size, void *Node); //void MM_DumpTables(tVAddr Start, tVAddr End); +tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr); //void MM_ClearUser(void); tPAddr MM_DuplicatePage(tVAddr VAddr); @@ -97,6 +94,7 @@ tPAddr MM_DuplicatePage(tVAddr VAddr); #define gaPAE_TmpPDPT ((tTabEnt*)PAE_TMP_PDPT_ADDR) int gbUsePAE = 0; tMutex glTempMappings; +tSemaphore gTempMappingsSem; tMutex glTempFractal; Uint32 gWorkerStacks[(NUM_WORKER_STACKS+31)/32]; int giLastUsedWorker = 0; @@ -107,6 +105,10 @@ struct sPageInfo { int Length; int Flags; } *gaMappedRegions; // sizeof = 24 bytes +// - Zero page +tShortSpinlock glMM_ZeroPage; +tPAddr giMM_ZeroPage; +tWatchpoint *gapKernelWatchpoints[KWATCH_BUCKETS]; // === CODE === /** @@ -117,6 +119,8 @@ void MM_PreinitVirtual(void) { gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3; INVLPG( PAGE_TABLE_ADDR ); + + Semaphore_Init(&gTempMappingsSem, NUM_TEMP_PAGES, NUM_TEMP_PAGES, "MMVirt", "Temp Mappings"); } /** @@ -125,12 +129,15 @@ void MM_PreinitVirtual(void) */ void MM_InstallVirtual(void) { - int i; - + // Don't bother referencing, as it'a in the kernel area + //MM_RefPhys( gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] ); // --- Pre-Allocate kernel tables - for( i = KERNEL_BASE>>22; i < 1024; i ++ ) + for( int i = KERNEL_BASE>>22; i < 1024; i ++ ) { - if( gaPageDir[ i ] ) continue; + if( gaPageDir[ i ] ) { + // MM_RefPhys( gaPageDir[ i ] & ~0xFFF ); + continue; + } // Skip stack tables, they are process unique if( i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) { gaPageDir[ i ] = 0; @@ -143,10 +150,23 @@ void MM_InstallVirtual(void) } // Unset kernel on the User Text pages - for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) { - MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL ); + ASSERT( ((tVAddr)&_UsertextBase & (PAGE_SIZE-1)) == 0 ); + //ASSERT( ((tVAddr)&_UsertextEnd & (PAGE_SIZE-1)) == 0 ); + for( tPage *page = &_UsertextBase; page < &_UsertextEnd; page ++ ) + { + MM_SetFlags( page, 0, MM_PFLAG_KERNEL ); } - + + // Unmap the area between end of kernel image and the heap + // DISABLED: Assumptions in main.c + #if 0 + for( tPage *page = &gKernelEnd; page < (tPage*)(KERNEL_BASE+4*1024*1024); page ++ ) + { + gaPageTable[ (tVAddr)page / PAGE_SIZE ] = 0; + //MM_Deallocate(page); + } + #endif + *gpTmpCR3 = 0; } @@ -164,26 +184,27 @@ void MM_FinishVirtualInit(void) */ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) { + Uint32 *pde = &gaPageDir[Addr>>22]; + Uint32 *pte = &gaPageTable[Addr>>12]; //ENTER("xAddr bErrorCode", Addr, ErrorCode); // -- Check for COW -- - if( gaPageDir [Addr>>22] & PF_PRESENT && gaPageTable[Addr>>12] & PF_PRESENT - && gaPageTable[Addr>>12] & PF_COW ) + if( (*pde & PF_PRESENT) && (*pte & PF_PRESENT) && (*pte & PF_COW) ) { tPAddr paddr; __asm__ __volatile__ ("sti"); - if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1) + if( MM_GetRefCount( *pte & ~0xFFF ) == 1 ) { - gaPageTable[Addr>>12] &= ~PF_COW; - gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE; + *pte &= ~PF_COW; + *pte |= PF_PRESENT|PF_WRITE; } else { //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr); paddr = MM_DuplicatePage( Addr ); - MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF ); - gaPageTable[Addr>>12] &= PF_USER; - gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE; + MM_DerefPhys( *pte & ~0xFFF ); + *pte &= PF_USER; + *pte |= paddr|PF_PRESENT|PF_WRITE; } // Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]); @@ -192,6 +213,38 @@ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) return; } + // --- Check for write to controlled area --- + // TODO: Catch user access + if( (*pde & PF_PRESENT) && (*pte & PF_PRESENT) && !(*pte & PF_WRITE) && (*pte & PF_WATCHED) ) + { + Uint page = Addr >> 12; + Uint ofs = Addr & 0xFFF; + // Watchpoints are active for this page. + // > Locate watchpoint bitmap for page (dword granuality) + tWatchpoint *wp = ( Addr >= KERNEL_BASE ? gapKernelWatchpoints[page%KWATCH_BUCKETS] : NULL); + while( wp && wp->PageNum == page ) + wp = wp->Next; + if( !wp ) + { + Log_Warning("MMVirt", "PF_WATCHED set on %p but no watchpoint info avaliable", Addr); + } + else + { + // > If bit set, log/raise + if( wp->Bitmap[ (ofs/4)/8 ] & (1 << (ofs/4)%8) ) + { + Log_Error("DEBUG", "Watchpoint %p written by %x:%p", + Addr, Regs->cs, Regs->eip); + } + Regs->eflags |= 1<<8; + //Proc_GetCurThread()->Proc.WPPage = Addr; + } + // > Clear write protection, set tracing + *pte |= PF_WRITE; + INVLPG( Addr & ~0xFFF ); + return ; + } + // Disable instruction tracing __ASM__("pushf; andw $0xFEFF, 0(%esp); popf"); Proc_GetCurThread()->bInstrTrace = 0; @@ -254,6 +307,37 @@ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr); } +void MM_DumpTables_Print(tVAddr Start, Uint32 Orig, size_t Size, void *Node) +{ + if( (Orig & ~(PAGE_SIZE-1)) == giMM_ZeroPage ) + { + Log(" 0x%08x => ZERO + 0x%08x (%s%s%s%s%s) %p", + Start, + Size, + (Orig & PF_NOPAGE ? "P" : "-"), + (Orig & PF_COW ? "C" : "-"), + (Orig & PF_GLOBAL ? "G" : "-"), + (Orig & PF_USER ? "U" : "-"), + (Orig & PF_WRITE ? "W" : "-"), + Node + ); + } + else + { + Log(" 0x%08x => 0x%08x + 0x%08x (%s%s%s%s%s) %p", + Start, + Orig & ~0xFFF, + Size, + (Orig & PF_NOPAGE ? "P" : "-"), + (Orig & PF_COW ? "C" : "-"), + (Orig & PF_GLOBAL ? "G" : "-"), + (Orig & PF_USER ? "U" : "-"), + (Orig & PF_WRITE ? "W" : "-"), + Node + ); + } +} + /** * \fn void MM_DumpTables(tVAddr Start, tVAddr End) * \brief Dumps the layout of the page tables @@ -297,17 +381,7 @@ void MM_DumpTables(tVAddr Start, tVAddr End) { if(expected) { tPAddr orig = gaPageTable[rangeStart>>12]; - Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p", - rangeStart, - orig & ~0xFFF, - curPos - rangeStart, - (orig & PF_NOPAGE ? "P" : "-"), - (orig & PF_COW ? "C" : "-"), - (orig & PF_GLOBAL ? "G" : "-"), - (orig & PF_USER ? "U" : "-"), - (orig & PF_WRITE ? "W" : "-"), - expected_node - ); + MM_DumpTables_Print(rangeStart, orig, curPos - rangeStart, expected_node); expected = 0; } if( !(gaPageDir[curPos>>22] & PF_PRESENT) ) continue; @@ -317,22 +391,13 @@ void MM_DumpTables(tVAddr Start, tVAddr End) MM_GetPageNode(expected, &expected_node); rangeStart = curPos; } - if(expected) expected += 0x1000; + if(expected && (expected & ~(PAGE_SIZE-1)) != giMM_ZeroPage) + expected += 0x1000; } if(expected) { tPAddr orig = gaPageTable[rangeStart>>12]; - Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p", - rangeStart, - orig & ~0xFFF, - curPos - rangeStart, - (orig & PF_NOPAGE ? "p" : "-"), - (orig & PF_COW ? "C" : "-"), - (orig & PF_GLOBAL ? "G" : "-"), - (orig & PF_USER ? "U" : "-"), - (orig & PF_WRITE ? "W" : "-"), - expected_node - ); + MM_DumpTables_Print(rangeStart, orig, curPos - rangeStart, expected_node); expected = 0; } } @@ -340,172 +405,182 @@ void MM_DumpTables(tVAddr Start, tVAddr End) /** * \fn tPAddr MM_Allocate(tVAddr VAddr) */ -tPAddr MM_Allocate(tVAddr VAddr) +tPAddr MM_Allocate(volatile void * VAddr) { - tPAddr paddr; - //ENTER("xVAddr", VAddr); - //__ASM__("xchg %bx,%bx"); - // Check if the directory is mapped - if( gaPageDir[ VAddr >> 22 ] == 0 ) + tPAddr paddr = MM_AllocPhys(); + if( MM_Map(VAddr, paddr) ) { - // Allocate directory - paddr = MM_AllocPhys(); - if( paddr == 0 ) { - Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0)); - //LEAVE('i',0); - return 0; - } - // Map and mark as user (if needed) - gaPageDir[ VAddr >> 22 ] = paddr | 3; - if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER; - - INVLPG( &gaPageDir[ VAddr >> 22 ] ); - memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 ); + return paddr; } - // Check if the page is already allocated - else if( gaPageTable[ VAddr >> 12 ] != 0 ) { + + // Error of some form, either an overwrite or OOM + MM_DerefPhys(paddr); + + // Check for overwrite + paddr = MM_GetPhysAddr(VAddr); + if( paddr != 0 ) { Warning("MM_Allocate - Allocating to used address (%p)", VAddr); - //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF); - return gaPageTable[ VAddr >> 12 ] & ~0xFFF; + return paddr; } - // Allocate - paddr = MM_AllocPhys(); - //LOG("paddr = 0x%llx", paddr); - if( paddr == 0 ) { - Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)", - VAddr, __builtin_return_address(0)); - //LEAVE('i',0); - return 0; - } - // Map - gaPageTable[ VAddr >> 12 ] = paddr | 3; - // Mark as user - if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER; - // Invalidate Cache for address - INVLPG( VAddr & ~0xFFF ); - - //LEAVE('X', paddr); - return paddr; + // OOM + Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0)); + return 0; } -/** - * \fn void MM_Deallocate(tVAddr VAddr) - */ -void MM_Deallocate(tVAddr VAddr) +void MM_AllocateZero(volatile void *VAddr) { - if( gaPageDir[ VAddr >> 22 ] == 0 ) { - Warning("MM_Deallocate - Directory not mapped"); - return; + if( MM_GetPhysAddr(VAddr) ) { + Warning("MM_AllocateZero - Attempted overwrite at %p", VAddr); + return ; } - - if(gaPageTable[ VAddr >> 12 ] == 0) { - Warning("MM_Deallocate - Page is not allocated"); - return; + if( !giMM_ZeroPage ) + { + SHORTLOCK(&glMM_ZeroPage); + // Check again within the lock (just in case we lost the race) + if( giMM_ZeroPage == 0 ) + { + giMM_ZeroPage = MM_Allocate(VAddr); + // - Reference a second time to prevent it from being freed + MM_RefPhys(giMM_ZeroPage); + memset((void*)VAddr, 0, PAGE_SIZE); + } + SHORTREL(&glMM_ZeroPage); } - - // Dereference page - MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF ); - // Clear page - gaPageTable[ VAddr >> 12 ] = 0; -} - -/** - * \fn tPAddr MM_GetPhysAddr(tVAddr Addr) - * \brief Checks if the passed address is accesable - */ -tPAddr MM_GetPhysAddr(tVAddr Addr) -{ - if( !(gaPageDir[Addr >> 22] & 1) ) - return 0; - if( !(gaPageTable[Addr >> 12] & 1) ) - return 0; - return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF); -} - -/** - * \fn void MM_SetCR3(Uint CR3) - * \brief Sets the current process space - */ -void MM_SetCR3(Uint CR3) -{ - __ASM__("mov %0, %%cr3"::"r"(CR3)); + else + { + MM_Map(VAddr, giMM_ZeroPage); + MM_RefPhys(giMM_ZeroPage); + } + MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW); } /** * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr) * \brief Map a physical page to a virtual one */ -int MM_Map(tVAddr VAddr, tPAddr PAddr) +int MM_Map(volatile void *VAddr, tPAddr PAddr) { - //ENTER("xVAddr xPAddr", VAddr, PAddr); + Uint pagenum = (tVAddr)VAddr >> 12; + + #if TRACE_MAPS + Debug("MM_Map(%p, %P)", VAddr, PAddr); + #endif + // Sanity check - if( PAddr & 0xFFF || VAddr & 0xFFF ) { - Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned (0x%P and %p)", - PAddr, VAddr); + if( (PAddr & 0xFFF) || ((tVAddr)VAddr & 0xFFF) ) { + Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned (%P and %p) - %p", + PAddr, VAddr, __builtin_return_address(0)); //LEAVE('i', 0); return 0; } - // Align addresses - PAddr &= ~0xFFF; VAddr &= ~0xFFF; - + bool is_user = ((tVAddr)VAddr < MM_USER_MAX); + // Check if the directory is mapped - if( gaPageDir[ VAddr >> 22 ] == 0 ) + if( gaPageDir[ pagenum >> 10 ] == 0 ) { tPAddr tmp = MM_AllocPhys(); if( tmp == 0 ) return 0; - gaPageDir[ VAddr >> 22 ] = tmp | 3; + gaPageDir[ pagenum >> 10 ] = tmp | 3 | (is_user ? PF_USER : 0); - // Mark as user - if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER; - - INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] ); - memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 ); + INVLPG( &gaPageTable[ pagenum & ~0x3FF ] ); + memsetd( &gaPageTable[ pagenum & ~0x3FF ], 0, 1024 ); } // Check if the page is already allocated - else if( gaPageTable[ VAddr >> 12 ] != 0 ) { + else if( gaPageTable[ pagenum ] != 0 ) { Warning("MM_Map - Allocating to used address"); //LEAVE('i', 0); return 0; } // Map - gaPageTable[ VAddr >> 12 ] = PAddr | 3; - // Mark as user - if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER; - - //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x", - // VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]); - - // Reference - MM_RefPhys( PAddr ); + gaPageTable[ pagenum ] = PAddr | 3 | (is_user ? PF_USER : 0); - //LOG("INVLPG( 0x%x )", VAddr); INVLPG( VAddr ); - //LEAVE('i', 1); return 1; } +/* + * A.k.a MM_Unmap + */ +void MM_Deallocate(volatile void *VAddr) +{ + Uint pagenum = (tVAddr)VAddr >> 12; + if( gaPageDir[pagenum>>10] == 0 ) { + Warning("MM_Deallocate - Directory not mapped"); + return; + } + + if(gaPageTable[pagenum] == 0) { + Warning("MM_Deallocate - Page is not allocated"); + return; + } + + // Dereference and clear page + tPAddr paddr = gaPageTable[pagenum] & ~0xFFF; + gaPageTable[pagenum] = 0; + MM_DerefPhys( paddr ); +} + +/** + * \fn tPAddr MM_GetPhysAddr(tVAddr Addr) + * \brief Checks if the passed address is accesable + */ +tPAddr MM_GetPhysAddr(volatile const void *Addr) +{ + tVAddr addr = (tVAddr)Addr; + if( !(gaPageDir[addr >> 22] & 1) ) + return 0; + if( !(gaPageTable[addr >> 12] & 1) ) + return 0; + return (gaPageTable[addr >> 12] & ~0xFFF) | (addr & 0xFFF); +} + +/** + * \brief Get the address of a page from another addres space + * \return Refenced physical address (or 0 on error) + */ +tPAddr MM_GetPageFromAS(tProcess *Process, volatile const void *Addr) +{ + tPAddr ret = 0; + GET_TEMP_MAPPING(Process->MemState.CR3); + tVAddr addr = (tVAddr)Addr; + if( (gaTmpDir[addr >> 22] & 1) && (gaTmpTable[addr >> 12] & 1) ) { + ret = (gaTmpTable[addr >> 12] & ~0xFFF) | (addr & 0xFFF); + MM_RefPhys( ret ); + } + REL_TEMP_MAPPING(); + return ret; +} + +/** + * \fn void MM_SetCR3(Uint CR3) + * \brief Sets the current process space + */ +void MM_SetCR3(Uint CR3) +{ + __ASM__("mov %0, %%cr3"::"r"(CR3)); +} + /** * \brief Clear user's address space */ void MM_ClearUser(void) { - Uint i, j; - - for( i = 0; i < (MM_USER_MAX>>22); i ++ ) + ASSERTC(MM_PPD_MIN, ==, MM_USER_MAX); + for( unsigned int i = 0; i < (MM_USER_MAX>>22); i ++ ) { // Check if directory is not allocated if( !(gaPageDir[i] & PF_PRESENT) ) { gaPageDir[i] = 0; continue; } - + // Deallocate tables - for( j = 0; j < 1024; j ++ ) + for( unsigned int j = 0; j < 1024; j ++ ) { if( gaPageTable[i*1024+j] & 1 ) MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF ); @@ -525,8 +600,6 @@ void MM_ClearUser(void) */ void MM_ClearSpace(Uint32 CR3) { - int i, j; - if(CR3 == (*gpPageCR3 & ~0xFFF)) { Log_Error("MMVirt", "Can't clear current address space"); return ; @@ -543,7 +616,7 @@ void MM_ClearSpace(Uint32 CR3) GET_TEMP_MAPPING(CR3); INVLPG( gaTmpDir ); - for( i = 0; i < 1024; i ++ ) + for( int i = 0; i < 1024; i ++ ) { Uint32 *table = &gaTmpTable[i*1024]; if( !(gaTmpDir[i] & PF_PRESENT) ) @@ -553,7 +626,7 @@ void MM_ClearSpace(Uint32 CR3) if( i < 768 || (i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) ) { - for( j = 0; j < 1024; j ++ ) + for( int j = 0; j < 1024; j ++ ) { if( !(table[j] & 1) ) continue; @@ -583,7 +656,6 @@ tPAddr MM_Clone(int bNoUserCopy) tPAddr ret; Uint page = 0; tVAddr kStackBase = Proc_GetCurThread()->KernelStack - MM_KERNEL_STACK_SIZE; - void *tmp; // Create Directory Table ret = MM_AllocPhys(); @@ -692,10 +764,8 @@ tPAddr MM_Clone(int bNoUserCopy) // Allocate page gaTmpTable[i*1024+j] = MM_AllocPhys() | 3; - MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF ); - - tmp = MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF ); - memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 ); + void *tmp = MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF ); + memcpy( tmp, (void *)( (i*1024+j)*PAGE_SIZE ), PAGE_SIZE ); MM_FreeTemp( tmp ); } } @@ -712,17 +782,16 @@ tPAddr MM_Clone(int bNoUserCopy) */ tVAddr MM_NewKStack(void) { - tVAddr base; - Uint i; - for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE) + for(tVAddr base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE) { + tPage *pageptr = (void*)base; // Check if space is free - if(MM_GetPhysAddr(base) != 0) continue; + if(MM_GetPhysAddr(pageptr) != 0) + continue; // Allocate - //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; ) - for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 ) + for(Uint i = 0; i < MM_KERNEL_STACK_SIZE/PAGE_SIZE; i ++ ) { - if( MM_Allocate(base+i) == 0 ) + if( MM_Allocate(pageptr + i) == 0 ) { // On error, print a warning and return error Warning("MM_NewKStack - Out of memory"); @@ -747,10 +816,10 @@ tVAddr MM_NewKStack(void) */ tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) { - Uint base, addr; - tVAddr tmpPage; + Uint base; tPAddr page; + LOG("(StackContents=%p,ContentsSize=%i)", StackContents, ContentsSize); // TODO: Thread safety // Find a free worker stack address for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++) @@ -768,9 +837,10 @@ tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) break; } if(base >= NUM_WORKER_STACKS) { - Warning("Uh-oh! Out of worker stacks"); + Log_Error("MMVirt", "Uh-oh! Out of worker stacks"); return 0; } + LOG("base=0x%x", base); // It's ours now! gWorkerStacks[base/32] |= (1 << base); @@ -779,6 +849,7 @@ tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) // We have one base = WORKER_STACKS + base * WORKER_STACK_SIZE; //Log(" MM_NewWorkerStack: base = 0x%x", base); + LOG("base=%p (top)", base); // Set the temp fractals to TID0's address space GET_TEMP_MAPPING( ((Uint)gaInitPageDir - KERNEL_BASE) ); @@ -786,29 +857,33 @@ tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) // Check if the directory is mapped (we are assuming that the stacks // will fit neatly in a directory) - //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]); + LOG("gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]); if(gaTmpDir[ base >> 22 ] == 0) { gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3; INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] ); } // Mapping Time! - for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 ) + for( Uint addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 ) { page = MM_AllocPhys(); gaTmpTable[ (base + addr) >> 12 ] = page | 3; } + LOG("mapped"); // Release temporary fractal REL_TEMP_MAPPING(); // NOTE: Max of 1 page // `page` is the last allocated page from the previious for loop - tmpPage = (tVAddr)MM_MapTemp( page ); - memcpy( (void*)( tmpPage + (0x1000 - ContentsSize) ), StackContents, ContentsSize); - MM_FreeTemp( (void*)tmpPage ); + LOG("Mapping first page"); + char *tmpPage = MM_MapTemp( page ); + LOG("tmpPage=%p", tmpPage); + memcpy( tmpPage + (0x1000 - ContentsSize), StackContents, ContentsSize); + MM_FreeTemp( tmpPage ); //Log("MM_NewWorkerStack: RETURN 0x%x", base); + LOG("return %p", base+WORKER_STACK_SIZE); return base + WORKER_STACK_SIZE; } @@ -816,13 +891,13 @@ tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) * \brief Sets the flags on a page */ -void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) +void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask) { - tTabEnt *ent; - if( !(gaPageDir[VAddr >> 22] & 1) ) return ; - if( !(gaPageTable[VAddr >> 12] & 1) ) return ; + Uint pagenum = (tVAddr)VAddr >> 12; + if( !(gaPageDir[pagenum >> 10] & 1) ) return ; + if( !(gaPageTable[pagenum] & 1) ) return ; - ent = &gaPageTable[VAddr >> 12]; + tTabEnt *ent = &gaPageTable[pagenum]; // Read-Only if( Mask & MM_PFLAG_RO ) @@ -831,7 +906,7 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) *ent &= ~PF_WRITE; } else { - gaPageDir[VAddr >> 22] |= PF_WRITE; + gaPageDir[pagenum >> 10] |= PF_WRITE; *ent |= PF_WRITE; } } @@ -843,7 +918,7 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) *ent &= ~PF_USER; } else { - gaPageDir[VAddr >> 22] |= PF_USER; + gaPageDir[pagenum >> 10] |= PF_USER; *ent |= PF_USER; } } @@ -868,17 +943,17 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) /** * \brief Get the flags on a page */ -Uint MM_GetFlags(tVAddr VAddr) +Uint MM_GetFlags(volatile const void *VAddr) { - tTabEnt *ent; - Uint ret = 0; + Uint pagenum = (tVAddr)VAddr >> 12; // Validity Check - if( !(gaPageDir[VAddr >> 22] & 1) ) return 0; - if( !(gaPageTable[VAddr >> 12] & 1) ) return 0; + if( !(gaPageDir[pagenum >> 10] & 1) ) return 0; + if( !(gaPageTable[pagenum] & 1) ) return 0; - ent = &gaPageTable[VAddr >> 12]; + tTabEnt *ent = &gaPageTable[pagenum]; + Uint ret = 0; // Read-Only if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO; // Kernel @@ -976,34 +1051,43 @@ tPAddr MM_DuplicatePage(tVAddr VAddr) * \brief Create a temporary memory mapping * \todo Show Luigi Barone (C Lecturer) and see what he thinks */ -void * MM_MapTemp(tPAddr PAddr) +void *MM_MapTemp(tPAddr PAddr) { - int i; - - //ENTER("XPAddr", PAddr); + ENTER("PPAddr", PAddr); PAddr &= ~0xFFF; - //LOG("glTempMappings = %i", glTempMappings); - - for(;;) + if( Semaphore_Wait(&gTempMappingsSem, 1) != 1 ) + return NULL; + LOG("Semaphore good"); + Mutex_Acquire( &glTempMappings ); + for( int i = 0; i < NUM_TEMP_PAGES; i ++ ) { - Mutex_Acquire( &glTempMappings ); + Uint32 *pte = &gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ]; + LOG("%i: %x", i, *pte); + // Check if page used + if(*pte & 1) continue; + MM_RefPhys( PAddr ); - for( i = 0; i < NUM_TEMP_PAGES; i ++ ) - { - // Check if page used - if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1) continue; - // Mark as used - gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3; - INVLPG( TEMP_MAP_ADDR + (i << 12) ); - //LEAVE('p', TEMP_MAP_ADDR + (i << 12)); - Mutex_Release( &glTempMappings ); - return (void*)( TEMP_MAP_ADDR + (i << 12) ); - } + // Mark as used + *pte = PAddr | 3; + INVLPG( TEMP_MAP_ADDR + (i << 12) ); + LEAVE('p', TEMP_MAP_ADDR + (i << 12)); Mutex_Release( &glTempMappings ); - Threads_Yield(); // TODO: Use a sleep queue here instead + return (void*)( TEMP_MAP_ADDR + (i << 12) ); } + Mutex_Release( &glTempMappings ); + Log_KernelPanic("MMVirt", "Semaphore suplied a mapping, but none are avaliable"); + return NULL; +} + +void *MM_MapTempFromProc(tProcess *Process, const void *VAddr) +{ + // Get paddr + tPAddr paddr = MM_GetPageFromAS(Process, VAddr); + if( paddr == 0 ) + return NULL; + return MM_MapTemp(paddr); } /** @@ -1016,7 +1100,11 @@ void MM_FreeTemp(void *VAddr) //ENTER("xVAddr", VAddr); if(i >= (TEMP_MAP_ADDR >> 12)) + { + MM_DerefPhys( gaPageTable[i] & ~0xFFF ); gaPageTable[ i ] = 0; + Semaphore_Signal(&gTempMappingsSem, 1); + } //LEAVE('-'); } @@ -1025,14 +1113,19 @@ void MM_FreeTemp(void *VAddr) * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number) * \brief Allocates a contigous number of pages */ -tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number) +void *MM_MapHWPages(tPAddr PAddr, Uint Number) { - int i, j; + int j; PAddr &= ~0xFFF; - + + if( PAddr < 1024*1024 && (1024*1024-PAddr) >= Number * PAGE_SIZE ) + { + return (void*)(KERNEL_BASE + PAddr); + } + // Scan List - for( i = 0; i < NUM_HW_PAGES; i ++ ) + for( int i = 0; i < NUM_HW_PAGES; i ++ ) { // Check if addr used if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 ) @@ -1052,7 +1145,7 @@ tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number) MM_RefPhys( PAddr + (j<<12) ); gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3; } - return HW_MAP_ADDR + (i<<12); + return (void*)(HW_MAP_ADDR + (i<<12)); } } // If we don't find any, return NULL @@ -1067,11 +1160,10 @@ tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number) * \param PhysAddr Pointer to the location to place the physical address allocated * \return Virtual address allocate */ -tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) +void *MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) { - tPAddr maxCheck = (1 << MaxBits); tPAddr phys; - tVAddr ret; + void *ret; ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr); @@ -1079,23 +1171,20 @@ tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) MaxBits = PHYS_BITS; // Sanity Check - if(MaxBits < 12 || !PhysAddr) { + if(MaxBits < 12) { LEAVE('i', 0); return 0; } - // Bound - if(MaxBits >= PHYS_BITS) maxCheck = -1; - // Fast Allocate if(Pages == 1 && MaxBits >= PHYS_BITS) { phys = MM_AllocPhys(); + if( PhysAddr ) + *PhysAddr = phys; if( !phys ) { - *PhysAddr = 0; LEAVE_RET('i', 0); } - *PhysAddr = phys; ret = MM_MapHWPages(phys, 1); if(ret == 0) { MM_DerefPhys(phys); @@ -1103,7 +1192,7 @@ tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) return 0; } LEAVE('x', ret); - return ret; + return (void*)ret; } // Slow Allocate @@ -1116,40 +1205,46 @@ tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) // Allocated successfully, now map ret = MM_MapHWPages(phys, Pages); + // - MapHWPages references the memory, so release references + for( int i = 0; i < Pages; i ++ ) + MM_DerefPhys(phys + i*PAGE_SIZE); if( ret == 0 ) { - // If it didn't map, free then return 0 - for(;Pages--;phys+=0x1000) - MM_DerefPhys(phys); LEAVE('i', 0); return 0; } - *PhysAddr = phys; + if( PhysAddr ) + *PhysAddr = phys; LEAVE('x', ret); - return ret; + return (void*)ret; } /** * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number) * \brief Unmap a hardware page */ -void MM_UnmapHWPages(tVAddr VAddr, Uint Number) +void MM_UnmapHWPages(volatile void *Base, Uint Number) { - int i, j; - + tVAddr VAddr = (tVAddr)Base; //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number); + + // + if( KERNEL_BASE <= VAddr && VAddr < KERNEL_BASE + 1024*1024 ) + return ; + Uint pagenum = VAddr >> 12; + // Sanity Check if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX) return; - i = VAddr >> 12; Mutex_Acquire( &glTempMappings ); // Temp and HW share a directory, so they share a lock - for( j = 0; j < Number; j++ ) + for( Uint i = 0; i < Number; i ++ ) { - MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF ); - gaPageTable[ i + j ] = 0; + MM_DerefPhys( gaPageTable[ pagenum + i ] & ~0xFFF ); + gaPageTable[ pagenum + i ] = 0; + INVLPG( (tVAddr)(pagenum + i) << 12 ); } Mutex_Release( &glTempMappings );