From e2744a459d1c63435d7348d0bfd0e4b92b0ec9f9 Mon Sep 17 00:00:00 2001 From: John Hodge Date: Sat, 15 Feb 2014 19:59:31 +0800 Subject: [PATCH] Kernel - Change virtual memory API to use void* for virtual addresses - General code cleanup while fixing compilation issues --- KernelLand/Kernel/Makefile | 1 + KernelLand/Kernel/arch/x86/acpica.c | 11 +- KernelLand/Kernel/arch/x86/include/arch_int.h | 5 + KernelLand/Kernel/arch/x86/kpanic.c | 28 +- KernelLand/Kernel/arch/x86/mm_phys.c | 39 ++- KernelLand/Kernel/arch/x86/mm_virt.c | 272 +++++++++--------- KernelLand/Kernel/arch/x86/proc.c | 25 +- KernelLand/Kernel/arch/x86/vm8086.c | 41 +-- KernelLand/Kernel/bin/elf.c | 4 +- KernelLand/Kernel/debug.c | 4 +- KernelLand/Kernel/drv/vterm_vt100.c | 2 +- KernelLand/Kernel/heap.c | 2 +- KernelLand/Kernel/include/acess.h | 21 +- KernelLand/Kernel/include/debug_hooks.h | 2 + KernelLand/Kernel/syscalls.c | 12 +- KernelLand/Kernel/vfs/handle.c | 24 +- KernelLand/Kernel/vfs/mmap.c | 126 +++++--- KernelLand/Modules/Display/BochsGA/bochsvbe.c | 2 +- KernelLand/Modules/Display/VESA/main.c | 10 +- .../Modules/Network/PCnetFAST3/pcnet-fast3.c | 5 +- KernelLand/Modules/USB/EHCI/ehci.c | 6 +- 21 files changed, 362 insertions(+), 280 deletions(-) diff --git a/KernelLand/Kernel/Makefile b/KernelLand/Kernel/Makefile index b05d01a2..fde1bdaf 100644 --- a/KernelLand/Kernel/Makefile +++ b/KernelLand/Kernel/Makefile @@ -28,6 +28,7 @@ CPPFLAGS += -D ARCH=$(ARCH) -D ARCHDIR=$(ARCHDIR) -D PLATFORM=\"$(PLATFORM)\" -D CPPFLAGS += -D KERNEL_VERSION=$(KERNEL_VERSION) -ffreestanding CFLAGS += -Wall -fno-stack-protector -Wstrict-prototypes -std=gnu99 -g -fno-omit-frame-pointer CFLAGS += -Wshadow -Wpointer-arith -Wcast-align -Wwrite-strings -Wmissing-prototypes -Wmissing-declarations -Wredundant-decls -Wnested-externs -Winline -Wuninitialized +CFLAGS += -Werror CFLAGS += -O3 LDFLAGS += -T arch/$(ARCHDIR)/link.ld -g LIBGCC_PATH := $(shell $(CC) -print-libgcc-file-name) diff --git a/KernelLand/Kernel/arch/x86/acpica.c b/KernelLand/Kernel/arch/x86/acpica.c index 433feaac..5603add4 100644 --- a/KernelLand/Kernel/arch/x86/acpica.c +++ b/KernelLand/Kernel/arch/x86/acpica.c @@ -236,12 +236,14 @@ ACPI_STATUS AcpiOsReleaseObject(ACPI_CACHE_T *Cache, void *Object) void *AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length) { - if( PhysicalAddress < ONEMEG ) + if( PhysicalAddress < ONEMEG ) { + ASSERTCR(Length, <=, ONEMEG-PhysicalAddress, NULL); return (void*)(KERNEL_BASE | PhysicalAddress); + } Uint ofs = PhysicalAddress & (PAGE_SIZE-1); int npages = (ofs + Length + (PAGE_SIZE-1)) / PAGE_SIZE; - char *maploc = (void*)MM_MapHWPages(PhysicalAddress, npages); + char *maploc = MM_MapHWPages(PhysicalAddress, npages); if(!maploc) { LOG("Mapping %P+0x%x failed", PhysicalAddress, Length); return NULL; @@ -254,15 +256,16 @@ void *AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length) void AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Length) { - if( (tVAddr)LogicalAddress - KERNEL_BASE < ONEMEG ) + if( (tVAddr)LogicalAddress - KERNEL_BASE < ONEMEG ) { return ; + } LOG("%p", LogicalAddress); Uint ofs = (tVAddr)LogicalAddress & (PAGE_SIZE-1); int npages = (ofs + Length + (PAGE_SIZE-1)) / PAGE_SIZE; // TODO: Validate `Length` is the same as was passed to AcpiOsMapMemory - MM_UnmapHWPages( (tVAddr)LogicalAddress, npages); + MM_UnmapHWPages( LogicalAddress, npages); } ACPI_STATUS AcpiOsGetPhysicalAddress(void *LogicalAddress, ACPI_PHYSICAL_ADDRESS *PhysicalAddress) diff --git a/KernelLand/Kernel/arch/x86/include/arch_int.h b/KernelLand/Kernel/arch/x86/include/arch_int.h index 81ea2d77..f7613807 100644 --- a/KernelLand/Kernel/arch/x86/include/arch_int.h +++ b/KernelLand/Kernel/arch/x86/include/arch_int.h @@ -18,5 +18,10 @@ extern void __AtomicTestSetLoop(Uint *Ptr, Uint Value); */ extern void MM_ClearSpace(Uint32 CR3); +/** + * \brief Print a backtrace using the supplied IP/BP + */ +void Error_Backtrace(Uint EIP, Uint EBP); + #endif diff --git a/KernelLand/Kernel/arch/x86/kpanic.c b/KernelLand/Kernel/arch/x86/kpanic.c index 3adf8d56..ef2ad4cb 100644 --- a/KernelLand/Kernel/arch/x86/kpanic.c +++ b/KernelLand/Kernel/arch/x86/kpanic.c @@ -77,18 +77,27 @@ const struct { */ void KernelPanic_SetMode(void) { - int i; - __asm__ __volatile__ ("cli"); // Stop the processor! // This function is called by Panic(), but MM_PageFault and the // CPU exception handers also call it, so let's not clear the screen // twice if( giKP_Pos ) return ; + + #if USE_MP + // Send halt to all processors + for( int i = 0; i < giNumCPUs; i ++ ) + { + if(i == GetCPUNum()) continue ; + FB[i] = BGC|('A'+i); + MP_SendIPIVector(i, 0xED); + } + #endif + #if ENABLE_KPANIC_MODE // Restore VGA 0xB8000 text mode #if 0 - for( i = 0; i < NUM_REGVALUES; i++ ) + for( int i = 0; i < NUM_REGVALUES; i++ ) { // Reset Flip-Flop if( caRegValues[i].IdxPort == 0x3C0 ) inb(0x3DA); @@ -101,20 +110,9 @@ void KernelPanic_SetMode(void) inb(0x3DA); outb(0x3C0, 0x20); #endif - - #if USE_MP - // Send halt to all processors - for( i = 0; i < giNumCPUs; i ++ ) - { - if(i == GetCPUNum()) continue ; - FB[i] = BGC|('A'+i); - MP_SendIPIVector(i, 0xED); - } - #endif - #if ENABLE_KPANIC_MODE // Clear Screen - for( i = 0; i < 80*25; i++ ) + for( int i = 0; i < 80*25; i++ ) { FB[i] = BGC; } diff --git a/KernelLand/Kernel/arch/x86/mm_phys.c b/KernelLand/Kernel/arch/x86/mm_phys.c index eb422c96..cd5a33d0 100644 --- a/KernelLand/Kernel/arch/x86/mm_phys.c +++ b/KernelLand/Kernel/arch/x86/mm_phys.c @@ -391,20 +391,36 @@ void MM_RefPhys(tPAddr PAddr) { if( MM_GetPhysAddr( &gaPageReferences[PAddr] ) == 0 ) { - int i, base; - tVAddr addr = ((tVAddr)&gaPageReferences[PAddr]) & ~0xFFF; -// Log_Debug("PMem", "MM_RefPhys: Allocating info for %X", PAddr); + Uint base = PAddr & ~(1024-1); Mutex_Release( &glPhysAlloc ); - if( MM_Allocate( addr ) == 0 ) { + // No infinite recursion, AllocPhys doesn't need the reference array + // TODO: Race condition? (racy on populating) + if( MM_Allocate( &gaPageReferences[base] ) == 0 ) + { Log_KernelPanic("PMem", "MM_RefPhys: Out of physical memory allocating info for %X", PAddr*PAGE_SIZE ); + for(;;); } Mutex_Acquire( &glPhysAlloc ); + // TODO: Solve race condition. (see below) + // [1] See unallocated + // Release lock + // [2] Acquire lock + // See unallocated + // Release lock + // Allocate + // [1] Allocate + // Acquire lock + // Populate + // Release lock + // [2] Acquire lock + // Populate (clobbering) - base = PAddr & ~(1024-1); - for( i = 0; i < 1024; i ++ ) { + // Fill references from allocated bitmap + for( int i = 0; i < 1024; i ++ ) + { gaPageReferences[base + i] = (gaPageBitmap[(base+i)/32] & (1 << (base+i)%32)) ? 1 : 0; } } @@ -494,22 +510,19 @@ int MM_GetRefCount(tPAddr PAddr) int MM_SetPageNode(tPAddr PAddr, void *Node) { - tVAddr block_addr; - if( MM_GetRefCount(PAddr) == 0 ) return 1; PAddr /= PAGE_SIZE; - block_addr = (tVAddr) &gaPageNodes[PAddr]; - block_addr &= ~(PAGE_SIZE-1); + void *page_ptr = (void*)( (tVAddr)&gaPageNodes[PAddr] & ~(PAGE_SIZE-1) ); - if( !MM_GetPhysAddr( (void*)block_addr ) ) + if( !MM_GetPhysAddr( page_ptr ) ) { - if( !MM_Allocate( block_addr ) ) { + if( !MM_Allocate( page_ptr ) ) { Log_Warning("PMem", "Unable to allocate Node page"); return -1; } - memset( (void*)block_addr, 0, PAGE_SIZE ); + memset( page_ptr, 0, PAGE_SIZE ); } gaPageNodes[PAddr] = Node; diff --git a/KernelLand/Kernel/arch/x86/mm_virt.c b/KernelLand/Kernel/arch/x86/mm_virt.c index a6239748..acfeaa7b 100644 --- a/KernelLand/Kernel/arch/x86/mm_virt.c +++ b/KernelLand/Kernel/arch/x86/mm_virt.c @@ -19,6 +19,8 @@ #include #include +#define TRACE_MAPS 0 + #define TAB 22 #define WORKER_STACKS 0x00100000 // Thread0 Only! @@ -68,11 +70,11 @@ typedef Uint32 tTabEnt; // === IMPORTS === -extern char _UsertextEnd[], _UsertextBase[]; +extern tPage _UsertextEnd; +extern tPage _UsertextBase; extern Uint32 gaInitPageDir[1024]; extern Uint32 gaInitPageTable[1024]; extern void Threads_SegFault(tVAddr Addr); -extern void Error_Backtrace(Uint eip, Uint ebp); // === PROTOTYPES === void MM_PreinitVirtual(void); @@ -109,6 +111,9 @@ struct sPageInfo { int Length; int Flags; } *gaMappedRegions; // sizeof = 24 bytes +// - Zero page +tShortSpinlock glMM_ZeroPage; +tPAddr giMM_ZeroPage; // === CODE === /** @@ -145,8 +150,11 @@ void MM_InstallVirtual(void) } // Unset kernel on the User Text pages - for( int i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) { - MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL ); + ASSERT( ((tVAddr)&_UsertextBase & (PAGE_SIZE-1)) == 0 ); + //ASSERT( ((tVAddr)&_UsertextEnd & (PAGE_SIZE-1)) == 0 ); + for( tPage *page = &_UsertextBase; page < &_UsertextEnd; page ++ ) + { + MM_SetFlags( page, 0, MM_PFLAG_KERNEL ); } *gpTmpCR3 = 0; @@ -342,156 +350,150 @@ void MM_DumpTables(tVAddr Start, tVAddr End) /** * \fn tPAddr MM_Allocate(tVAddr VAddr) */ -tPAddr MM_Allocate(tVAddr VAddr) +tPAddr MM_Allocate(volatile void * VAddr) { - tPAddr paddr; - //ENTER("xVAddr", VAddr); - //__ASM__("xchg %bx,%bx"); - // Check if the directory is mapped - if( gaPageDir[ VAddr >> 22 ] == 0 ) - { - // Allocate directory - paddr = MM_AllocPhys(); - if( paddr == 0 ) { - Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0)); - //LEAVE('i',0); - return 0; - } - // Map and mark as user (if needed) - gaPageDir[ VAddr >> 22 ] = paddr | 3; - if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER; - - INVLPG( &gaPageDir[ VAddr >> 22 ] ); - memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 ); + tPAddr paddr = MM_AllocPhys(); + if( MM_Map(VAddr, paddr) ) { + return paddr; } - // Check if the page is already allocated - else if( gaPageTable[ VAddr >> 12 ] != 0 ) { + + // Error of some form, either an overwrite or OOM + MM_DerefPhys(paddr); + + // Check for overwrite + paddr = MM_GetPhysAddr(VAddr); + if( paddr != 0 ) { Warning("MM_Allocate - Allocating to used address (%p)", VAddr); - //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF); - return gaPageTable[ VAddr >> 12 ] & ~0xFFF; + return paddr; } - // Allocate - paddr = MM_AllocPhys(); - //LOG("paddr = 0x%llx", paddr); - if( paddr == 0 ) { - Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)", - VAddr, __builtin_return_address(0)); - //LEAVE('i',0); - return 0; - } - // Map - gaPageTable[ VAddr >> 12 ] = paddr | 3; - // Mark as user - if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER; - // Invalidate Cache for address - INVLPG( VAddr & ~0xFFF ); - - //LEAVE('X', paddr); - return paddr; + // OOM + Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0)); + return 0; } -/** - * \fn void MM_Deallocate(tVAddr VAddr) - */ -void MM_Deallocate(tVAddr VAddr) +void MM_AllocateZero(volatile void *VAddr) { - if( gaPageDir[ VAddr >> 22 ] == 0 ) { - Warning("MM_Deallocate - Directory not mapped"); - return; + if( MM_GetPhysAddr(VAddr) ) { + Warning("MM_AllocateZero - Attempted overwrite at %p", VAddr); + return ; } - - if(gaPageTable[ VAddr >> 12 ] == 0) { - Warning("MM_Deallocate - Page is not allocated"); - return; + if( !giMM_ZeroPage ) + { + SHORTLOCK(&glMM_ZeroPage); + // Check again within the lock (just in case we lost the race) + if( giMM_ZeroPage == 0 ) + { + giMM_ZeroPage = MM_Allocate(VAddr); + // - Reference a second time to prevent it from being freed + MM_RefPhys(giMM_ZeroPage); + memset((void*)VAddr, 0, PAGE_SIZE); + } + SHORTREL(&glMM_ZeroPage); } - - // Dereference page - MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF ); - // Clear page - gaPageTable[ VAddr >> 12 ] = 0; -} - -/** - * \fn tPAddr MM_GetPhysAddr(tVAddr Addr) - * \brief Checks if the passed address is accesable - */ -tPAddr MM_GetPhysAddr(volatile const void *Addr) -{ - tVAddr addr = (tVAddr)Addr; - if( !(gaPageDir[addr >> 22] & 1) ) - return 0; - if( !(gaPageTable[addr >> 12] & 1) ) - return 0; - return (gaPageTable[addr >> 12] & ~0xFFF) | (addr & 0xFFF); -} - -/** - * \fn void MM_SetCR3(Uint CR3) - * \brief Sets the current process space - */ -void MM_SetCR3(Uint CR3) -{ - __ASM__("mov %0, %%cr3"::"r"(CR3)); + else + { + MM_Map(VAddr, giMM_ZeroPage); + } + MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW); } /** * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr) * \brief Map a physical page to a virtual one */ -int MM_Map(tVAddr VAddr, tPAddr PAddr) +int MM_Map(volatile void *VAddr, tPAddr PAddr) { - //ENTER("xVAddr xPAddr", VAddr, PAddr); + Uint pagenum = (tVAddr)VAddr >> 12; + + #if TRACE_MAPS + Debug("MM_Map(%p, %P)", VAddr, PAddr); + #endif + // Sanity check - if( PAddr & 0xFFF || VAddr & 0xFFF ) { + if( PAddr & 0xFFF || (tVAddr)VAddr & 0xFFF ) { Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned (0x%P and %p)", PAddr, VAddr); //LEAVE('i', 0); return 0; } - // Align addresses - PAddr &= ~0xFFF; VAddr &= ~0xFFF; - + bool is_user = ((tVAddr)VAddr < MM_USER_MAX); + // Check if the directory is mapped - if( gaPageDir[ VAddr >> 22 ] == 0 ) + if( gaPageDir[ pagenum >> 10 ] == 0 ) { tPAddr tmp = MM_AllocPhys(); if( tmp == 0 ) return 0; - gaPageDir[ VAddr >> 22 ] = tmp | 3; + gaPageDir[ pagenum >> 10 ] = tmp | 3 | (is_user ? PF_USER : 0); - // Mark as user - if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER; - - INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] ); - memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 ); + INVLPG( &gaPageTable[ pagenum & ~0x3FF ] ); + memsetd( &gaPageTable[ pagenum & ~0x3FF ], 0, 1024 ); } // Check if the page is already allocated - else if( gaPageTable[ VAddr >> 12 ] != 0 ) { + else if( gaPageTable[ pagenum ] != 0 ) { Warning("MM_Map - Allocating to used address"); //LEAVE('i', 0); return 0; } // Map - gaPageTable[ VAddr >> 12 ] = PAddr | 3; - // Mark as user - if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER; - - //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x", - // VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]); + gaPageTable[ pagenum ] = PAddr | 3 | (is_user ? PF_USER : 0); // Reference MM_RefPhys( PAddr ); - //LOG("INVLPG( 0x%x )", VAddr); INVLPG( VAddr ); - //LEAVE('i', 1); return 1; } +/* + * A.k.a MM_Unmap + */ +void MM_Deallocate(volatile void *VAddr) +{ + Uint pagenum = (tVAddr)VAddr >> 12; + if( gaPageDir[pagenum>>10] == 0 ) { + Warning("MM_Deallocate - Directory not mapped"); + return; + } + + if(gaPageTable[pagenum] == 0) { + Warning("MM_Deallocate - Page is not allocated"); + return; + } + + // Dereference and clear page + tPAddr paddr = gaPageTable[pagenum] & ~0xFFF; + gaPageTable[pagenum] = 0; + MM_DerefPhys( paddr ); +} + +/** + * \fn tPAddr MM_GetPhysAddr(tVAddr Addr) + * \brief Checks if the passed address is accesable + */ +tPAddr MM_GetPhysAddr(volatile const void *Addr) +{ + tVAddr addr = (tVAddr)Addr; + if( !(gaPageDir[addr >> 22] & 1) ) + return 0; + if( !(gaPageTable[addr >> 12] & 1) ) + return 0; + return (gaPageTable[addr >> 12] & ~0xFFF) | (addr & 0xFFF); +} + +/** + * \fn void MM_SetCR3(Uint CR3) + * \brief Sets the current process space + */ +void MM_SetCR3(Uint CR3) +{ + __ASM__("mov %0, %%cr3"::"r"(CR3)); +} + /** * \brief Clear user's address space */ @@ -715,18 +717,16 @@ tPAddr MM_Clone(int bNoUserCopy) */ tVAddr MM_NewKStack(void) { - tVAddr base; - Uint i; - for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE) + for(tVAddr base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE) { + tPage *pageptr = (void*)base; // Check if space is free - if(MM_GetPhysAddr( (void*) base) != 0) + if(MM_GetPhysAddr(pageptr) != 0) continue; // Allocate - //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; ) - for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 ) + for(Uint i = 0; i < MM_KERNEL_STACK_SIZE/PAGE_SIZE; i ++ ) { - if( MM_Allocate(base+i) == 0 ) + if( MM_Allocate(pageptr + i) == 0 ) { // On error, print a warning and return error Warning("MM_NewKStack - Out of memory"); @@ -826,13 +826,13 @@ tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) * \brief Sets the flags on a page */ -void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) +void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask) { - tTabEnt *ent; - if( !(gaPageDir[VAddr >> 22] & 1) ) return ; - if( !(gaPageTable[VAddr >> 12] & 1) ) return ; + Uint pagenum = (tVAddr)VAddr >> 12; + if( !(gaPageDir[pagenum >> 10] & 1) ) return ; + if( !(gaPageTable[pagenum] & 1) ) return ; - ent = &gaPageTable[VAddr >> 12]; + tTabEnt *ent = &gaPageTable[pagenum]; // Read-Only if( Mask & MM_PFLAG_RO ) @@ -841,7 +841,7 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) *ent &= ~PF_WRITE; } else { - gaPageDir[VAddr >> 22] |= PF_WRITE; + gaPageDir[pagenum >> 10] |= PF_WRITE; *ent |= PF_WRITE; } } @@ -853,7 +853,7 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) *ent &= ~PF_USER; } else { - gaPageDir[VAddr >> 22] |= PF_USER; + gaPageDir[pagenum >> 10] |= PF_USER; *ent |= PF_USER; } } @@ -878,17 +878,17 @@ void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) /** * \brief Get the flags on a page */ -Uint MM_GetFlags(tVAddr VAddr) +Uint MM_GetFlags(volatile const void *VAddr) { - tTabEnt *ent; - Uint ret = 0; + Uint pagenum = (tVAddr)VAddr >> 12; // Validity Check - if( !(gaPageDir[VAddr >> 22] & 1) ) return 0; - if( !(gaPageTable[VAddr >> 12] & 1) ) return 0; + if( !(gaPageDir[pagenum >> 10] & 1) ) return 0; + if( !(gaPageTable[pagenum] & 1) ) return 0; - ent = &gaPageTable[VAddr >> 12]; + tTabEnt *ent = &gaPageTable[pagenum]; + Uint ret = 0; // Read-Only if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO; // Kernel @@ -1145,28 +1145,28 @@ void *MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr) * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number) * \brief Unmap a hardware page */ -void MM_UnmapHWPages(tVAddr VAddr, Uint Number) +void MM_UnmapHWPages(volatile void *Base, Uint Number) { - int i, j; - + tVAddr VAddr = (tVAddr)Base; //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number); // if( KERNEL_BASE <= VAddr && VAddr < KERNEL_BASE + 1024*1024 ) - return ; + return ; + + Uint pagenum = VAddr >> 12; // Sanity Check if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX) return; - i = VAddr >> 12; Mutex_Acquire( &glTempMappings ); // Temp and HW share a directory, so they share a lock - for( j = 0; j < Number; j++ ) + for( Uint i = 0; i < Number; i ++ ) { - MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF ); - gaPageTable[ i + j ] = 0; - INVLPG( (tVAddr)(i+j) << 12 ); + MM_DerefPhys( gaPageTable[ pagenum + i ] & ~0xFFF ); + gaPageTable[ pagenum + i ] = 0; + INVLPG( (tVAddr)(pagenum + i) << 12 ); } Mutex_Release( &glTempMappings ); diff --git a/KernelLand/Kernel/arch/x86/proc.c b/KernelLand/Kernel/arch/x86/proc.c index dac26c9b..7123eefb 100644 --- a/KernelLand/Kernel/arch/x86/proc.c +++ b/KernelLand/Kernel/arch/x86/proc.c @@ -213,7 +213,7 @@ void ArchThreads_Init(void) gProcessZero.MemState.CR3 = (Uint)gaInitPageDir - KERNEL_BASE; // Create Per-Process Data Block - if( !MM_Allocate(MM_PPD_CFG) ) + if( MM_Allocate( (void*)MM_PPD_CFG ) == 0 ) { Panic("OOM - No space for initial Per-Process Config"); } @@ -553,27 +553,28 @@ tThread *Proc_SpawnWorker(void (*Fcn)(void*), void *Data) */ Uint Proc_MakeUserStack(void) { - int i; - Uint base = USER_STACK_TOP - USER_STACK_SZ; + tPage *base = (void*)(USER_STACK_TOP - USER_STACK_SZ); // Check Prospective Space - for( i = USER_STACK_SZ >> 12; i--; ) - if( MM_GetPhysAddr( (void*)(base + (i<<12)) ) != 0 ) - break; - - if(i != -1) return 0; - + for( Uint i = USER_STACK_SZ/PAGE_SIZE; i--; ) + { + if( MM_GetPhysAddr( base + i ) != 0 ) + { + Warning("Proc_MakeUserStack: Address %p in use", base + i); + return 0; + } + } // Allocate Stack - Allocate incrementally to clean up MM_Dump output - for( i = 0; i < USER_STACK_SZ/0x1000; i++ ) + for( Uint i = 0; i < USER_STACK_SZ/PAGE_SIZE; i++ ) { - if( !MM_Allocate( base + (i<<12) ) ) + if( MM_Allocate( base + i ) == 0 ) { Warning("OOM: Proc_MakeUserStack"); return 0; } } - return base + USER_STACK_SZ; + return (tVAddr)( base + USER_STACK_SZ/PAGE_SIZE ); } void Proc_StartUser(Uint Entrypoint, Uint Base, int ArgC, const char **ArgV, int DataSize) diff --git a/KernelLand/Kernel/arch/x86/vm8086.c b/KernelLand/Kernel/arch/x86/vm8086.c index 25638f6a..c1c48ce3 100644 --- a/KernelLand/Kernel/arch/x86/vm8086.c +++ b/KernelLand/Kernel/arch/x86/vm8086.c @@ -41,7 +41,7 @@ enum eVM8086_Opcodes struct sVM8086_InternalPages { Uint32 Bitmap; // 32 sections = 128 byte blocks - tVAddr VirtBase; + char *VirtBase; tPAddr PhysAddr; }; struct sVM8086_InternalData @@ -96,20 +96,20 @@ int VM8086_Install(char **Arguments) // Map ROM Area for(i=0xA0;i<0x100;i++) { - MM_Map( i * 0x1000, i * 0x1000 ); + MM_Map( (void*)(i * 0x1000), i * 0x1000 ); } - MM_Map( 0, 0 ); // IVT / BDA + MM_Map( (void*)0, 0 ); // IVT / BDA if( MM_GetRefCount(0x00000) > 2 ) { Log_Notice("VM8086", "Ok, who's touched the IVT? (%i)", MM_GetRefCount(0x00000)); } - MM_Map( 0x9F000, 0x9F000 ); // Stack / EBDA + MM_Map( (void*)0x9F000, 0x9F000 ); // Stack / EBDA if( MM_GetRefCount(0x9F000) > 2 ) { Log_Notice("VM8086", "And who's been playing with my EBDA? (%i)", MM_GetRefCount(0x9F000)); } // System Stack / Stub - if( MM_Allocate( 0x100000 ) == 0 ) { + if( MM_Allocate( (void*)0x100000 ) == 0 ) { Log_Error("VM8086", "Unable to allocate memory for stack/stub"); gVM8086_WorkerPID = 0; Threads_Exit(0, 1); @@ -184,7 +184,6 @@ void VM8086_GPF(tRegs *Regs) if(Regs->eip == VM8086_MAGIC_IP && Regs->cs == VM8086_MAGIC_CS && Threads_GetPID() == gVM8086_WorkerPID) { - int i; if( gpVM8086_State == (void*)-1 ) { Log_Log("VM8086", "Worker thread ready and waiting"); gpVM8086_State = NULL; @@ -192,7 +191,8 @@ void VM8086_GPF(tRegs *Regs) } // Log_Log("VM8086", "gpVM8086_State = %p, gVM8086_CallingThread = %i", // gpVM8086_State, gVM8086_CallingThread); - if( gpVM8086_State ) { + if( gpVM8086_State ) + { gpVM8086_State->AX = Regs->eax; gpVM8086_State->CX = Regs->ecx; gpVM8086_State->DX = Regs->edx; gpVM8086_State->BX = Regs->ebx; gpVM8086_State->BP = Regs->ebp; @@ -201,10 +201,11 @@ void VM8086_GPF(tRegs *Regs) LOG("gpVM8086_State = %p", gpVM8086_State); LOG("gpVM8086_State->Internal = %p", gpVM8086_State->Internal); - for( i = 0; i < VM8086_PAGES_PER_INST; i ++ ) { + for( Uint i = 0; i < VM8086_PAGES_PER_INST; i ++ ) + { if( !gpVM8086_State->Internal->AllocatedPages[i].VirtBase ) continue ; - MM_Deallocate( VM8086_USER_BASE + i*PAGE_SIZE ); + MM_Deallocate( (tPage*)VM8086_USER_BASE + i ); } gpVM8086_State = NULL; @@ -217,11 +218,11 @@ void VM8086_GPF(tRegs *Regs) __asm__ __volatile__ ("sti"); Semaphore_Wait(&gVM8086_TasksToDo, 1); - for( i = 0; i < VM8086_PAGES_PER_INST; i ++ ) + for( Uint i = 0; i < VM8086_PAGES_PER_INST; i ++ ) { if( !gpVM8086_State->Internal->AllocatedPages[i].VirtBase ) continue ; - MM_Map( VM8086_USER_BASE + i*PAGE_SIZE, gpVM8086_State->Internal->AllocatedPages[i].PhysAddr ); + MM_Map( (tPage*)VM8086_USER_BASE + i, gpVM8086_State->Internal->AllocatedPages[i].PhysAddr ); } @@ -416,9 +417,8 @@ tVM8086 *VM8086_Init(void) void VM8086_Free(tVM8086 *State) { - int i; // TODO: Make sure the state isn't in use currently - for( i = VM8086_PAGES_PER_INST; i --; ) + for( Uint i = VM8086_PAGES_PER_INST; i --; ) MM_UnmapHWPages( State->Internal->AllocatedPages[i].VirtBase, 1); free(State); } @@ -443,8 +443,11 @@ void *VM8086_Allocate(tVM8086 *State, int Size, Uint16 *Segment, Uint16 *Offset) rem = nBlocks; base = 0; // Scan the bitmap for a free block - for( j = 0; j < 32; j++ ) { - if( pages[i].Bitmap & (1 << j) ) { + // - 32 blocks per page == 128 bytes per block == 8 segments + for( j = 0; j < 32; j++ ) + { + if( pages[i].Bitmap & (1 << j) ) + { base = j+1; rem = nBlocks; } @@ -458,7 +461,7 @@ void *VM8086_Allocate(tVM8086 *State, int Size, Uint16 *Segment, Uint16 *Offset) *Offset = 0; LOG("Allocated at #%i,%04x", i, base*8*16); LOG(" - %x:%x", *Segment, *Offset); - return (void*)( pages[i].VirtBase + base * 8 * 16 ); + return pages[i].VirtBase + base * 8 * 16; } } } @@ -474,7 +477,7 @@ void *VM8086_Allocate(tVM8086 *State, int Size, Uint16 *Segment, Uint16 *Offset) return NULL; } - pages[i].VirtBase = (tVAddr)MM_AllocDMA(1, -1, &pages[i].PhysAddr); + pages[i].VirtBase = MM_AllocDMA(1, -1, &pages[i].PhysAddr); if( pages[i].VirtBase == 0 ) { Log_Warning("VM8086", "Unable to allocate data page"); return NULL; @@ -489,7 +492,7 @@ void *VM8086_Allocate(tVM8086 *State, int Size, Uint16 *Segment, Uint16 *Offset) *Segment = (VM8086_USER_BASE + i * 0x1000) / 16; *Offset = 0; LOG(" - %04x:%04x", *Segment, *Offset); - return (void*) pages[i].VirtBase; + return pages[i].VirtBase; } void *VM8086_GetPointer(tVM8086 *State, Uint16 Segment, Uint16 Offset) @@ -502,7 +505,7 @@ void *VM8086_GetPointer(tVM8086 *State, Uint16 Segment, Uint16 Offset) if( State->Internal->AllocatedPages[pg].VirtBase == 0) return NULL; else - return (Uint8*)State->Internal->AllocatedPages[pg].VirtBase + (addr & 0xFFF); + return State->Internal->AllocatedPages[pg].VirtBase + (addr & 0xFFF); } else { diff --git a/KernelLand/Kernel/bin/elf.c b/KernelLand/Kernel/bin/elf.c index d404d6db..7f65d89d 100644 --- a/KernelLand/Kernel/bin/elf.c +++ b/KernelLand/Kernel/bin/elf.c @@ -33,10 +33,10 @@ static int _SysSetMemFlags(tVAddr addr, int flag, int mask) { if( mask & 1 ) { if( flag ) { // Re-set RO, clear COW - MM_SetFlags(addr, MM_PFLAG_RO, MM_PFLAG_RO|MM_PFLAG_COW); + MM_SetFlags((void*)addr, MM_PFLAG_RO, MM_PFLAG_RO|MM_PFLAG_COW); } else { - MM_SetFlags(addr, MM_PFLAG_RO|MM_PFLAG_COW, MM_PFLAG_RO|MM_PFLAG_COW); + MM_SetFlags((void*)addr, MM_PFLAG_RO|MM_PFLAG_COW, MM_PFLAG_RO|MM_PFLAG_COW); } } return 0; diff --git a/KernelLand/Kernel/debug.c b/KernelLand/Kernel/debug.c index 5bcbcb8e..d0de25ab 100644 --- a/KernelLand/Kernel/debug.c +++ b/KernelLand/Kernel/debug.c @@ -4,17 +4,17 @@ */ #include #include +#include #define DEBUG_MAX_LINE_LEN 256 #define LOCK_DEBUG_OUTPUT 1 // Avoid interleaving of output lines? #define TRACE_TO_KTERM 0 // Send ENTER/DEBUG/LEAVE to debug? // === IMPORTS === -extern void Threads_Dump(void); -extern void Heap_Dump(void); extern void KernelPanic_SetMode(void); extern void KernelPanic_PutChar(char Ch); extern void IPStack_SendDebugText(const char *Text); +extern void VT_SetTerminal(int TerminalID); // === PROTOTYPES === static void Debug_Putchar(char ch); diff --git a/KernelLand/Kernel/drv/vterm_vt100.c b/KernelLand/Kernel/drv/vterm_vt100.c index b5d60a94..4428b64e 100644 --- a/KernelLand/Kernel/drv/vterm_vt100.c +++ b/KernelLand/Kernel/drv/vterm_vt100.c @@ -5,7 +5,7 @@ * drv/vterm_vt100.c * - Virtual Terminal - VT100 (Kinda) Emulation */ -#define DEBUG 1 +#define DEBUG 0 #include "vterm.h" #define sTerminal sVTerm diff --git a/KernelLand/Kernel/heap.c b/KernelLand/Kernel/heap.c index 6e0a4c10..d4304c27 100755 --- a/KernelLand/Kernel/heap.c +++ b/KernelLand/Kernel/heap.c @@ -97,7 +97,7 @@ void *Heap_Extend(size_t Bytes) // Heap expands in pages for( Uint i = 0; i < pages; i ++ ) { - if( !MM_Allocate( (tVAddr)gHeapEnd+(i<<12) ) ) + if( !MM_Allocate( (tPage*)gHeapEnd + i ) ) { Warning("OOM - Heap_Extend (%i bytes)"); Heap_Dump(); diff --git a/KernelLand/Kernel/include/acess.h b/KernelLand/Kernel/include/acess.h index a43e5c02..8a413a27 100644 --- a/KernelLand/Kernel/include/acess.h +++ b/KernelLand/Kernel/include/acess.h @@ -53,6 +53,7 @@ typedef Sint64 tTime; //!< Same again typedef struct sShortSpinlock tShortSpinlock; //!< Opaque (kinda) spinlock typedef int bool; //!< Boolean type typedef Uint64 off_t; //!< VFS Offset +typedef struct { char _[PAGE_SIZE];} tPage; // Representation of a page for pointer arithmatic // --- Helper Macros --- /** @@ -188,19 +189,25 @@ extern Uint64 inq(Uint16 Port); * \param VAddr Virtual Address to allocate at * \return Physical address allocated */ -extern tPAddr MM_Allocate(tVAddr VAddr) __attribute__ ((warn_unused_result)); +extern tPAddr MM_Allocate(volatile void *VAddr) __attribute__ ((warn_unused_result)); +/** + * \breif Allocate a zeroed COW page to \a VAddr + * \param VAddr Virtual address to allocate at + * \return Physical address allocated (don't cache) + */ +extern void MM_AllocateZero(volatile void *VAddr); /** * \brief Deallocate a page * \param VAddr Virtual address to unmap */ -extern void MM_Deallocate(tVAddr VAddr); +extern void MM_Deallocate(volatile void *VAddr); /** * \brief Map a physical page at \a PAddr to \a VAddr * \param VAddr Target virtual address * \param PAddr Physical address to map * \return Boolean Success */ -extern int MM_Map(tVAddr VAddr, tPAddr PAddr); +extern int MM_Map(volatile void * VAddr, tPAddr PAddr); /** * \brief Get the physical address of \a Addr * \param Addr Address of the page to get the physical address of @@ -213,19 +220,19 @@ extern tPAddr MM_GetPhysAddr(volatile const void *Addr); * \param Flags New flags value * \param Mask Flags to set */ -extern void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask); +extern void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask); /** * \brief Get the flags on a flag * \param VAddr Virtual address of page * \return Flags value of the page */ -extern Uint MM_GetFlags(tVAddr VAddr); +extern Uint MM_GetFlags(volatile const void *VAddr); /** * \brief Checks is a memory range is user accessable * \param VAddr Base address to check * \return 1 if the memory is all user-accessable, 0 otherwise */ -#define MM_IsUser(VAddr) (!(MM_GetFlags((tVAddr)(VAddr))&MM_PFLAG_KERNEL)) +#define MM_IsUser(VAddr) (!(MM_GetFlags((const void*)(VAddr))&MM_PFLAG_KERNEL)) /** * \brief Temporarily map a page into the address space * \param PAddr Physical addres to map @@ -257,7 +264,7 @@ extern void *MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr); * \param VAddr Virtual address allocate by ::MM_MapHWPages or ::MM_AllocDMA * \param Number Number of pages to free */ -extern void MM_UnmapHWPages(tVAddr VAddr, Uint Number); +extern void MM_UnmapHWPages(volatile void *VAddr, Uint Number); /** * \brief Allocate a single physical page * \return Physical address allocated diff --git a/KernelLand/Kernel/include/debug_hooks.h b/KernelLand/Kernel/include/debug_hooks.h index 8ea42115..4c267571 100644 --- a/KernelLand/Kernel/include/debug_hooks.h +++ b/KernelLand/Kernel/include/debug_hooks.h @@ -13,4 +13,6 @@ extern void Threads_Dump(void); extern void Threads_ToggleTrace(int TID); extern void Heap_Stats(void); +extern void Proc_PrintBacktrace(void); + #endif diff --git a/KernelLand/Kernel/syscalls.c b/KernelLand/Kernel/syscalls.c index 96f1f854..d2f91858 100644 --- a/KernelLand/Kernel/syscalls.c +++ b/KernelLand/Kernel/syscalls.c @@ -44,7 +44,7 @@ extern Uint Binary_Load(const char *file, Uint *entryPoint); void SyscallHandler(tSyscallRegs *Regs); int Syscall_ValidString(const char *Addr); int Syscall_Valid(int Size, const void *Addr); - int Syscall_MM_SetFlags(const void *Addr, Uint Flags, Uint Mask); + int Syscall_MM_SetFlags(void *Addr, Uint Flags, Uint Mask); // === CODE === // TODO: Do sanity checking on arguments, ATM the user can really fuck with the kernel @@ -117,13 +117,13 @@ void SyscallHandler(tSyscallRegs *Regs) break; // -- Map an address - case SYS_MAP: MM_Map(Regs->Arg1, Regs->Arg2); break; + case SYS_MAP: MM_Map((void*)Regs->Arg1, Regs->Arg2); break; // -- Allocate an address - case SYS_ALLOCATE: ret = MM_Allocate(Regs->Arg1); break; + case SYS_ALLOCATE: ret = MM_Allocate((void*)Regs->Arg1); break; // -- Unmap an address - case SYS_UNMAP: MM_Deallocate(Regs->Arg1); break; + case SYS_UNMAP: MM_Deallocate((void*)Regs->Arg1); break; // -- Change the protection on an address case SYS_SETFLAGS: @@ -454,7 +454,7 @@ int Syscall_Valid(int Size, const void *Addr) return CheckMem( Addr, Size ); } -int Syscall_MM_SetFlags(const void *Addr, Uint Flags, Uint Mask) +int Syscall_MM_SetFlags(void *Addr, Uint Flags, Uint Mask) { tPAddr paddr = MM_GetPhysAddr(Addr); Flags &= MM_PFLAG_RO|MM_PFLAG_EXEC; @@ -471,6 +471,6 @@ int Syscall_MM_SetFlags(const void *Addr, Uint Flags, Uint Mask) Mask |= MM_PFLAG_COW; } } - MM_SetFlags((tVAddr)Addr, Flags, Mask); + MM_SetFlags(Addr, Flags, Mask); return 0; } diff --git a/KernelLand/Kernel/vfs/handle.c b/KernelLand/Kernel/vfs/handle.c index cd4997d8..661ced81 100644 --- a/KernelLand/Kernel/vfs/handle.c +++ b/KernelLand/Kernel/vfs/handle.c @@ -76,15 +76,16 @@ int VFS_AllocHandle(int bIsUser, tVFS_Node *Node, int Mode) // Allocate Buffer if( MM_GetPhysAddr( gaUserHandles ) == 0 ) { - Uint addr, size; - size = max_handles * sizeof(tVFS_Handle); - for(addr = 0; addr < size; addr += 0x1000) + tPage *pageptr = (void*)gaUserHandles; + size_t size = max_handles * sizeof(tVFS_Handle); + for( size_t ofs = 0; ofs < size; ofs ++) { - if( !MM_Allocate( (tVAddr)gaUserHandles + addr ) ) + if( !MM_Allocate( pageptr ) ) { Warning("OOM - VFS_AllocHandle"); Threads_Exit(0, 0xFF); // Terminate user } + pageptr ++; } memset( gaUserHandles, 0, size ); } @@ -103,15 +104,15 @@ int VFS_AllocHandle(int bIsUser, tVFS_Node *Node, int Mode) // Allocate space if not already if( MM_GetPhysAddr( gaKernelHandles ) == 0 ) { - Uint addr, size; - size = MAX_KERNEL_FILES * sizeof(tVFS_Handle); - for(addr = 0; addr < size; addr += 0x1000) + tPage *pageptr = (void*)gaKernelHandles; + size_t size = MAX_KERNEL_FILES * sizeof(tVFS_Handle); + for(size_t ofs = 0; ofs < size; ofs += size) { - if( !MM_Allocate( (tVAddr)gaKernelHandles + addr ) ) + if( !MM_Allocate( pageptr ) ) { Panic("OOM - VFS_AllocHandle"); - Threads_Exit(0, 0xFF); // Terminate application (get some space back) } + pageptr ++; } memset( gaKernelHandles, 0, size ); } @@ -151,14 +152,13 @@ void VFS_ReferenceUserHandles(void) void VFS_CloseAllUserHandles(void) { - int i; int max_handles = *Threads_GetMaxFD(); // Check if this process has any handles if( MM_GetPhysAddr( gaUserHandles ) == 0 ) return ; - for( i = 0; i < max_handles; i ++ ) + for( int i = 0; i < max_handles; i ++ ) { tVFS_Handle *h; h = &gaUserHandles[i]; @@ -249,7 +249,7 @@ void VFS_RestoreHandles(int NumFDs, void *Handles) if( !MM_GetPhysAddr(h) ) { void *pg = (void*)( (tVAddr)h & ~(PAGE_SIZE-1) ); - if( !MM_Allocate( (tVAddr)pg ) ) + if( !MM_Allocate( pg ) ) { // OOM? return ; diff --git a/KernelLand/Kernel/vfs/mmap.c b/KernelLand/Kernel/vfs/mmap.c index cbfce43b..3539f37c 100644 --- a/KernelLand/Kernel/vfs/mmap.c +++ b/KernelLand/Kernel/vfs/mmap.c @@ -22,11 +22,14 @@ struct sVFS_MMapPageBlock tPAddr PhysAddrs[MMAP_PAGES_PER_BLOCK]; }; +// === PROTOTYPES === +//void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset); +void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask); + // === CODE === void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset) { - tVFS_Handle *h; - tVAddr mapping_dest, mapping_base; + tVAddr mapping_base; int npages, pagenum; tVFS_MMapPageBlock *pb, *prev; @@ -39,41 +42,24 @@ void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, pagenum = Offset / PAGE_SIZE; mapping_base = (tVAddr)DestHint; - mapping_dest = mapping_base & ~(PAGE_SIZE-1); + tPage *mapping_dest = (void*)(mapping_base & ~(PAGE_SIZE-1)); - // TODO: Locate space for the allocation + if( DestHint == NULL ) + { + // TODO: Locate space for the allocation + LEAVE('n'); + return NULL; + } // Handle anonymous mappings if( Flags & MMAP_MAP_ANONYMOUS ) { - size_t ofs = 0; - LOG("%i pages anonymous to %p", npages, mapping_dest); - for( ; npages --; mapping_dest += PAGE_SIZE, ofs += PAGE_SIZE ) - { - if( MM_GetPhysAddr((void*)mapping_dest) ) { - // TODO: Set flags to COW if needed (well, if shared) - MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW); - LOG("clear from %p, %i bytes", (void*)(mapping_base + ofs), - PAGE_SIZE - (mapping_base & (PAGE_SIZE-1)) - ); - memset( (void*)(mapping_base + ofs), 0, PAGE_SIZE - (mapping_base & (PAGE_SIZE-1))); - LOG("dune"); - } - else { - LOG("New empty page"); - // TODO: Map a COW zero page instead - if( !MM_Allocate(mapping_dest) ) { - // TODO: Error - Log_Warning("VFS", "VFS_MMap: Anon alloc to %p failed", mapping_dest); - } - memset((void*)mapping_dest, 0, PAGE_SIZE); - LOG("Anon map to %p", mapping_dest); - } - } - LEAVE_RET('p', (void*)mapping_base); + // TODO: Comvert \a Protection into a flag set + void *ret = VFS_MMap_Anon((void*)mapping_base, Length, 0, 0); + LEAVE_RET('p', ret); } - h = VFS_GetHandle(FD); + tVFS_Handle *h = VFS_GetHandle(FD); if( !h || !h->Node ) LEAVE_RET('n', NULL); LOG("h = %p", h); @@ -111,7 +97,7 @@ void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, // - Map (and allocate) pages while( npages -- ) { - if( MM_GetPhysAddr( (void*)mapping_dest ) == 0 ) + if( MM_GetPhysAddr( mapping_dest ) == 0 ) { if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 ) { @@ -121,7 +107,7 @@ void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, // TODO: error } else if( nt->MMap ) - nt->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest); + nt->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest); else { int read_len; @@ -134,13 +120,13 @@ void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, } // TODO: Clip read length read_len = nt->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, - (void*)mapping_dest, 0); + mapping_dest, 0); // TODO: This was commented out, why? if( read_len != PAGE_SIZE ) { - memset( (void*)(mapping_dest+read_len), 0, PAGE_SIZE-read_len ); + memset( (char*)mapping_dest + read_len, 0, PAGE_SIZE-read_len ); } } - pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( (void*)mapping_dest ); + pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest ); MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node ); MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] ); LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest, @@ -181,7 +167,7 @@ void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, if( Flags & MMAP_MAP_PRIVATE ) MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW); pagenum ++; - mapping_dest += PAGE_SIZE; + mapping_dest ++; // Roll on to next block if needed if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK) @@ -206,6 +192,74 @@ void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, return (void*)mapping_base; } +void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask) +{ + size_t ofs = (tVAddr)Destination & (PAGE_SIZE-1); + tPage *mapping_dest = (void*)( (char*)Destination - ofs ); + + if( ofs > 0 ) + { + size_t bytes = MIN(PAGE_SIZE - ofs, Length); + + // Allocate a partial page + if( MM_GetPhysAddr(mapping_dest) ) + { + // Already allocated page, clear the area we're touching + ASSERT( ofs + bytes <= PAGE_SIZE ); + + // TODO: Double check that this area isn't already zero + memset( Destination, 0, bytes ); + + MM_SetFlags(mapping_dest, FlagsSet, FlagsMask); + + LOG("#1: Clear %i from %p", Length, Destination); + } + else + { + MM_AllocateZero(mapping_dest); + LOG("#1: Allocate for %p", Destination); + } + mapping_dest ++; + Length -= bytes; + } + while( Length >= PAGE_SIZE ) + { + if( MM_GetPhysAddr( mapping_dest ) ) + { + // We're allocating entire pages here, so free this page and replace with a COW zero + MM_Deallocate(mapping_dest); + LOG("Replace %p with zero page", mapping_dest); + } + else + { + LOG("Allocate zero at %p", mapping_dest); + } + MM_AllocateZero(mapping_dest); + + mapping_dest ++; + Length -= PAGE_SIZE; + } + if( Length > 0 ) + { + ASSERT(Length < PAGE_SIZE); + + // Tail page + if( MM_GetPhysAddr(mapping_dest) ) + { + // TODO: Don't touch page if already zero + memset( mapping_dest, 0, Length ); + LOG("Clear %i in %p", Length, mapping_dest); + } + else + { + MM_AllocateZero(mapping_dest); + LOG("Anon map to %p", mapping_dest); + } + } + + return Destination; +} + int VFS_MUnmap(void *Addr, size_t Length) { return 0; diff --git a/KernelLand/Modules/Display/BochsGA/bochsvbe.c b/KernelLand/Modules/Display/BochsGA/bochsvbe.c index 4eb9c66d..a6338ae1 100644 --- a/KernelLand/Modules/Display/BochsGA/bochsvbe.c +++ b/KernelLand/Modules/Display/BochsGA/bochsvbe.c @@ -140,7 +140,7 @@ int BGA_Install(char **Arguments) void BGA_Uninstall(void) { DevFS_DelDevice( &gBGA_DriverStruct ); - MM_UnmapHWPages( (tVAddr)gBGA_Framebuffer, 768 ); + MM_UnmapHWPages( gBGA_Framebuffer, 768 ); } /** diff --git a/KernelLand/Modules/Display/VESA/main.c b/KernelLand/Modules/Display/VESA/main.c index 86079c67..32022efa 100644 --- a/KernelLand/Modules/Display/VESA/main.c +++ b/KernelLand/Modules/Display/VESA/main.c @@ -433,16 +433,10 @@ int Vesa_Int_SetMode(int mode) // Map Framebuffer if( gpVesaCurMode ) { - if( gpVesaCurMode->framebuffer < 1024*1024 ) - ; - else - MM_UnmapHWPages((tVAddr)gpVesa_Framebuffer, giVesaPageCount); + MM_UnmapHWPages(gpVesa_Framebuffer, giVesaPageCount); } giVesaPageCount = (modeptr->fbSize + 0xFFF) >> 12; - if( modeptr->framebuffer < 1024*1024 ) - gpVesa_Framebuffer = (void*)(KERNEL_BASE|modeptr->framebuffer); - else - gpVesa_Framebuffer = (void*)MM_MapHWPages(modeptr->framebuffer, giVesaPageCount); + gpVesa_Framebuffer = MM_MapHWPages(modeptr->framebuffer, giVesaPageCount); Log_Log("VBE", "Setting mode to %i 0x%x (%ix%i %ibpp) %p[0x%x] maps %P", mode, modeptr->code, diff --git a/KernelLand/Modules/Network/PCnetFAST3/pcnet-fast3.c b/KernelLand/Modules/Network/PCnetFAST3/pcnet-fast3.c index 0e4c6a0f..2a586511 100644 --- a/KernelLand/Modules/Network/PCnetFAST3/pcnet-fast3.c +++ b/KernelLand/Modules/Network/PCnetFAST3/pcnet-fast3.c @@ -153,8 +153,9 @@ int PCnet3_Install(char **Options) i ++; } - if( gpPCnet3_InitBlock != &gPCnet3_StaticInitBlock ) { - MM_UnmapHWPages( (tVAddr)gpPCnet3_InitBlock, 1 ); + if( gpPCnet3_InitBlock != &gPCnet3_StaticInitBlock ) + { + MM_UnmapHWPages( gpPCnet3_InitBlock, 1 ); } return MODULE_ERR_OK; diff --git a/KernelLand/Modules/USB/EHCI/ehci.c b/KernelLand/Modules/USB/EHCI/ehci.c index e7de9a1b..d202d067 100644 --- a/KernelLand/Modules/USB/EHCI/ehci.c +++ b/KernelLand/Modules/USB/EHCI/ehci.c @@ -258,11 +258,11 @@ int EHCI_InitController(tPAddr BaseAddress, Uint8 InterruptNum) _error: cont->PhysBase = 0; if( cont->CapRegs ) - MM_Deallocate( (tVAddr)cont->CapRegs ); + MM_Deallocate( cont->CapRegs ); if( cont->PeriodicQueue ) - MM_Deallocate( (tVAddr)cont->PeriodicQueue ); + MM_Deallocate( cont->PeriodicQueue ); if( cont->TDPool ) - MM_Deallocate( (tVAddr)cont->TDPool ); + MM_Deallocate( cont->TDPool ); LEAVE('i', 2); return 2; } -- 2.20.1