CPPFLAGS += -D KERNEL_VERSION=$(KERNEL_VERSION) -ffreestanding
CFLAGS += -Wall -fno-stack-protector -Wstrict-prototypes -std=gnu99 -g -fno-omit-frame-pointer
CFLAGS += -Wshadow -Wpointer-arith -Wcast-align -Wwrite-strings -Wmissing-prototypes -Wmissing-declarations -Wredundant-decls -Wnested-externs -Winline -Wuninitialized
+CFLAGS += -Werror
CFLAGS += -O3
LDFLAGS += -T arch/$(ARCHDIR)/link.ld -g
LIBGCC_PATH := $(shell $(CC) -print-libgcc-file-name)
void *AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length)
{
- if( PhysicalAddress < ONEMEG )
+ if( PhysicalAddress < ONEMEG ) {
+ ASSERTCR(Length, <=, ONEMEG-PhysicalAddress, NULL);
return (void*)(KERNEL_BASE | PhysicalAddress);
+ }
Uint ofs = PhysicalAddress & (PAGE_SIZE-1);
int npages = (ofs + Length + (PAGE_SIZE-1)) / PAGE_SIZE;
- char *maploc = (void*)MM_MapHWPages(PhysicalAddress, npages);
+ char *maploc = MM_MapHWPages(PhysicalAddress, npages);
if(!maploc) {
LOG("Mapping %P+0x%x failed", PhysicalAddress, Length);
return NULL;
void AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Length)
{
- if( (tVAddr)LogicalAddress - KERNEL_BASE < ONEMEG )
+ if( (tVAddr)LogicalAddress - KERNEL_BASE < ONEMEG ) {
return ;
+ }
LOG("%p", LogicalAddress);
Uint ofs = (tVAddr)LogicalAddress & (PAGE_SIZE-1);
int npages = (ofs + Length + (PAGE_SIZE-1)) / PAGE_SIZE;
// TODO: Validate `Length` is the same as was passed to AcpiOsMapMemory
- MM_UnmapHWPages( (tVAddr)LogicalAddress, npages);
+ MM_UnmapHWPages( LogicalAddress, npages);
}
ACPI_STATUS AcpiOsGetPhysicalAddress(void *LogicalAddress, ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
*/
extern void MM_ClearSpace(Uint32 CR3);
+/**
+ * \brief Print a backtrace using the supplied IP/BP
+ */
+void Error_Backtrace(Uint EIP, Uint EBP);
+
#endif
*/
void KernelPanic_SetMode(void)
{
- int i;
-
__asm__ __volatile__ ("cli"); // Stop the processor!
// This function is called by Panic(), but MM_PageFault and the
// CPU exception handers also call it, so let's not clear the screen
// twice
if( giKP_Pos ) return ;
+
+ #if USE_MP
+ // Send halt to all processors
+ for( int i = 0; i < giNumCPUs; i ++ )
+ {
+ if(i == GetCPUNum()) continue ;
+ FB[i] = BGC|('A'+i);
+ MP_SendIPIVector(i, 0xED);
+ }
+ #endif
+ #if ENABLE_KPANIC_MODE
// Restore VGA 0xB8000 text mode
#if 0
- for( i = 0; i < NUM_REGVALUES; i++ )
+ for( int i = 0; i < NUM_REGVALUES; i++ )
{
// Reset Flip-Flop
if( caRegValues[i].IdxPort == 0x3C0 ) inb(0x3DA);
inb(0x3DA);
outb(0x3C0, 0x20);
#endif
-
- #if USE_MP
- // Send halt to all processors
- for( i = 0; i < giNumCPUs; i ++ )
- {
- if(i == GetCPUNum()) continue ;
- FB[i] = BGC|('A'+i);
- MP_SendIPIVector(i, 0xED);
- }
- #endif
- #if ENABLE_KPANIC_MODE
// Clear Screen
- for( i = 0; i < 80*25; i++ )
+ for( int i = 0; i < 80*25; i++ )
{
FB[i] = BGC;
}
{
if( MM_GetPhysAddr( &gaPageReferences[PAddr] ) == 0 )
{
- int i, base;
- tVAddr addr = ((tVAddr)&gaPageReferences[PAddr]) & ~0xFFF;
-// Log_Debug("PMem", "MM_RefPhys: Allocating info for %X", PAddr);
+ Uint base = PAddr & ~(1024-1);
Mutex_Release( &glPhysAlloc );
- if( MM_Allocate( addr ) == 0 ) {
+ // No infinite recursion, AllocPhys doesn't need the reference array
+ // TODO: Race condition? (racy on populating)
+ if( MM_Allocate( &gaPageReferences[base] ) == 0 )
+ {
Log_KernelPanic("PMem",
"MM_RefPhys: Out of physical memory allocating info for %X",
PAddr*PAGE_SIZE
);
+ for(;;);
}
Mutex_Acquire( &glPhysAlloc );
+ // TODO: Solve race condition. (see below)
+ // [1] See unallocated
+ // Release lock
+ // [2] Acquire lock
+ // See unallocated
+ // Release lock
+ // Allocate
+ // [1] Allocate
+ // Acquire lock
+ // Populate
+ // Release lock
+ // [2] Acquire lock
+ // Populate (clobbering)
- base = PAddr & ~(1024-1);
- for( i = 0; i < 1024; i ++ ) {
+ // Fill references from allocated bitmap
+ for( int i = 0; i < 1024; i ++ )
+ {
gaPageReferences[base + i] = (gaPageBitmap[(base+i)/32] & (1 << (base+i)%32)) ? 1 : 0;
}
}
int MM_SetPageNode(tPAddr PAddr, void *Node)
{
- tVAddr block_addr;
-
if( MM_GetRefCount(PAddr) == 0 ) return 1;
PAddr /= PAGE_SIZE;
- block_addr = (tVAddr) &gaPageNodes[PAddr];
- block_addr &= ~(PAGE_SIZE-1);
+ void *page_ptr = (void*)( (tVAddr)&gaPageNodes[PAddr] & ~(PAGE_SIZE-1) );
- if( !MM_GetPhysAddr( (void*)block_addr ) )
+ if( !MM_GetPhysAddr( page_ptr ) )
{
- if( !MM_Allocate( block_addr ) ) {
+ if( !MM_Allocate( page_ptr ) ) {
Log_Warning("PMem", "Unable to allocate Node page");
return -1;
}
- memset( (void*)block_addr, 0, PAGE_SIZE );
+ memset( page_ptr, 0, PAGE_SIZE );
}
gaPageNodes[PAddr] = Node;
#include <arch_int.h>
#include <semaphore.h>
+#define TRACE_MAPS 0
+
#define TAB 22
#define WORKER_STACKS 0x00100000 // Thread0 Only!
typedef Uint32 tTabEnt;
// === IMPORTS ===
-extern char _UsertextEnd[], _UsertextBase[];
+extern tPage _UsertextEnd;
+extern tPage _UsertextBase;
extern Uint32 gaInitPageDir[1024];
extern Uint32 gaInitPageTable[1024];
extern void Threads_SegFault(tVAddr Addr);
-extern void Error_Backtrace(Uint eip, Uint ebp);
// === PROTOTYPES ===
void MM_PreinitVirtual(void);
int Length;
int Flags;
} *gaMappedRegions; // sizeof = 24 bytes
+// - Zero page
+tShortSpinlock glMM_ZeroPage;
+tPAddr giMM_ZeroPage;
// === CODE ===
/**
}
// Unset kernel on the User Text pages
- for( int i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
- MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
+ ASSERT( ((tVAddr)&_UsertextBase & (PAGE_SIZE-1)) == 0 );
+ //ASSERT( ((tVAddr)&_UsertextEnd & (PAGE_SIZE-1)) == 0 );
+ for( tPage *page = &_UsertextBase; page < &_UsertextEnd; page ++ )
+ {
+ MM_SetFlags( page, 0, MM_PFLAG_KERNEL );
}
*gpTmpCR3 = 0;
/**
* \fn tPAddr MM_Allocate(tVAddr VAddr)
*/
-tPAddr MM_Allocate(tVAddr VAddr)
+tPAddr MM_Allocate(volatile void * VAddr)
{
- tPAddr paddr;
- //ENTER("xVAddr", VAddr);
- //__ASM__("xchg %bx,%bx");
- // Check if the directory is mapped
- if( gaPageDir[ VAddr >> 22 ] == 0 )
- {
- // Allocate directory
- paddr = MM_AllocPhys();
- if( paddr == 0 ) {
- Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
- //LEAVE('i',0);
- return 0;
- }
- // Map and mark as user (if needed)
- gaPageDir[ VAddr >> 22 ] = paddr | 3;
- if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
-
- INVLPG( &gaPageDir[ VAddr >> 22 ] );
- memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
+ tPAddr paddr = MM_AllocPhys();
+ if( MM_Map(VAddr, paddr) ) {
+ return paddr;
}
- // Check if the page is already allocated
- else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
+
+ // Error of some form, either an overwrite or OOM
+ MM_DerefPhys(paddr);
+
+ // Check for overwrite
+ paddr = MM_GetPhysAddr(VAddr);
+ if( paddr != 0 ) {
Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
- //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
- return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
+ return paddr;
}
- // Allocate
- paddr = MM_AllocPhys();
- //LOG("paddr = 0x%llx", paddr);
- if( paddr == 0 ) {
- Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
- VAddr, __builtin_return_address(0));
- //LEAVE('i',0);
- return 0;
- }
- // Map
- gaPageTable[ VAddr >> 12 ] = paddr | 3;
- // Mark as user
- if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
- // Invalidate Cache for address
- INVLPG( VAddr & ~0xFFF );
-
- //LEAVE('X', paddr);
- return paddr;
+ // OOM
+ Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
+ return 0;
}
-/**
- * \fn void MM_Deallocate(tVAddr VAddr)
- */
-void MM_Deallocate(tVAddr VAddr)
+void MM_AllocateZero(volatile void *VAddr)
{
- if( gaPageDir[ VAddr >> 22 ] == 0 ) {
- Warning("MM_Deallocate - Directory not mapped");
- return;
+ if( MM_GetPhysAddr(VAddr) ) {
+ Warning("MM_AllocateZero - Attempted overwrite at %p", VAddr);
+ return ;
}
-
- if(gaPageTable[ VAddr >> 12 ] == 0) {
- Warning("MM_Deallocate - Page is not allocated");
- return;
+ if( !giMM_ZeroPage )
+ {
+ SHORTLOCK(&glMM_ZeroPage);
+ // Check again within the lock (just in case we lost the race)
+ if( giMM_ZeroPage == 0 )
+ {
+ giMM_ZeroPage = MM_Allocate(VAddr);
+ // - Reference a second time to prevent it from being freed
+ MM_RefPhys(giMM_ZeroPage);
+ memset((void*)VAddr, 0, PAGE_SIZE);
+ }
+ SHORTREL(&glMM_ZeroPage);
}
-
- // Dereference page
- MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
- // Clear page
- gaPageTable[ VAddr >> 12 ] = 0;
-}
-
-/**
- * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
- * \brief Checks if the passed address is accesable
- */
-tPAddr MM_GetPhysAddr(volatile const void *Addr)
-{
- tVAddr addr = (tVAddr)Addr;
- if( !(gaPageDir[addr >> 22] & 1) )
- return 0;
- if( !(gaPageTable[addr >> 12] & 1) )
- return 0;
- return (gaPageTable[addr >> 12] & ~0xFFF) | (addr & 0xFFF);
-}
-
-/**
- * \fn void MM_SetCR3(Uint CR3)
- * \brief Sets the current process space
- */
-void MM_SetCR3(Uint CR3)
-{
- __ASM__("mov %0, %%cr3"::"r"(CR3));
+ else
+ {
+ MM_Map(VAddr, giMM_ZeroPage);
+ }
+ MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
}
/**
* \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
* \brief Map a physical page to a virtual one
*/
-int MM_Map(tVAddr VAddr, tPAddr PAddr)
+int MM_Map(volatile void *VAddr, tPAddr PAddr)
{
- //ENTER("xVAddr xPAddr", VAddr, PAddr);
+ Uint pagenum = (tVAddr)VAddr >> 12;
+
+ #if TRACE_MAPS
+ Debug("MM_Map(%p, %P)", VAddr, PAddr);
+ #endif
+
// Sanity check
- if( PAddr & 0xFFF || VAddr & 0xFFF ) {
+ if( PAddr & 0xFFF || (tVAddr)VAddr & 0xFFF ) {
Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned (0x%P and %p)",
PAddr, VAddr);
//LEAVE('i', 0);
return 0;
}
- // Align addresses
- PAddr &= ~0xFFF; VAddr &= ~0xFFF;
-
+ bool is_user = ((tVAddr)VAddr < MM_USER_MAX);
+
// Check if the directory is mapped
- if( gaPageDir[ VAddr >> 22 ] == 0 )
+ if( gaPageDir[ pagenum >> 10 ] == 0 )
{
tPAddr tmp = MM_AllocPhys();
if( tmp == 0 )
return 0;
- gaPageDir[ VAddr >> 22 ] = tmp | 3;
+ gaPageDir[ pagenum >> 10 ] = tmp | 3 | (is_user ? PF_USER : 0);
- // Mark as user
- if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
-
- INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
- memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
+ INVLPG( &gaPageTable[ pagenum & ~0x3FF ] );
+ memsetd( &gaPageTable[ pagenum & ~0x3FF ], 0, 1024 );
}
// Check if the page is already allocated
- else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
+ else if( gaPageTable[ pagenum ] != 0 ) {
Warning("MM_Map - Allocating to used address");
//LEAVE('i', 0);
return 0;
}
// Map
- gaPageTable[ VAddr >> 12 ] = PAddr | 3;
- // Mark as user
- if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
-
- //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
- // VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
+ gaPageTable[ pagenum ] = PAddr | 3 | (is_user ? PF_USER : 0);
// Reference
MM_RefPhys( PAddr );
- //LOG("INVLPG( 0x%x )", VAddr);
INVLPG( VAddr );
- //LEAVE('i', 1);
return 1;
}
+/*
+ * A.k.a MM_Unmap
+ */
+void MM_Deallocate(volatile void *VAddr)
+{
+ Uint pagenum = (tVAddr)VAddr >> 12;
+ if( gaPageDir[pagenum>>10] == 0 ) {
+ Warning("MM_Deallocate - Directory not mapped");
+ return;
+ }
+
+ if(gaPageTable[pagenum] == 0) {
+ Warning("MM_Deallocate - Page is not allocated");
+ return;
+ }
+
+ // Dereference and clear page
+ tPAddr paddr = gaPageTable[pagenum] & ~0xFFF;
+ gaPageTable[pagenum] = 0;
+ MM_DerefPhys( paddr );
+}
+
+/**
+ * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
+ * \brief Checks if the passed address is accesable
+ */
+tPAddr MM_GetPhysAddr(volatile const void *Addr)
+{
+ tVAddr addr = (tVAddr)Addr;
+ if( !(gaPageDir[addr >> 22] & 1) )
+ return 0;
+ if( !(gaPageTable[addr >> 12] & 1) )
+ return 0;
+ return (gaPageTable[addr >> 12] & ~0xFFF) | (addr & 0xFFF);
+}
+
+/**
+ * \fn void MM_SetCR3(Uint CR3)
+ * \brief Sets the current process space
+ */
+void MM_SetCR3(Uint CR3)
+{
+ __ASM__("mov %0, %%cr3"::"r"(CR3));
+}
+
/**
* \brief Clear user's address space
*/
*/
tVAddr MM_NewKStack(void)
{
- tVAddr base;
- Uint i;
- for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE)
+ for(tVAddr base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE)
{
+ tPage *pageptr = (void*)base;
// Check if space is free
- if(MM_GetPhysAddr( (void*) base) != 0)
+ if(MM_GetPhysAddr(pageptr) != 0)
continue;
// Allocate
- //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; )
- for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
+ for(Uint i = 0; i < MM_KERNEL_STACK_SIZE/PAGE_SIZE; i ++ )
{
- if( MM_Allocate(base+i) == 0 )
+ if( MM_Allocate(pageptr + i) == 0 )
{
// On error, print a warning and return error
Warning("MM_NewKStack - Out of memory");
* \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
* \brief Sets the flags on a page
*/
-void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
+void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask)
{
- tTabEnt *ent;
- if( !(gaPageDir[VAddr >> 22] & 1) ) return ;
- if( !(gaPageTable[VAddr >> 12] & 1) ) return ;
+ Uint pagenum = (tVAddr)VAddr >> 12;
+ if( !(gaPageDir[pagenum >> 10] & 1) ) return ;
+ if( !(gaPageTable[pagenum] & 1) ) return ;
- ent = &gaPageTable[VAddr >> 12];
+ tTabEnt *ent = &gaPageTable[pagenum];
// Read-Only
if( Mask & MM_PFLAG_RO )
*ent &= ~PF_WRITE;
}
else {
- gaPageDir[VAddr >> 22] |= PF_WRITE;
+ gaPageDir[pagenum >> 10] |= PF_WRITE;
*ent |= PF_WRITE;
}
}
*ent &= ~PF_USER;
}
else {
- gaPageDir[VAddr >> 22] |= PF_USER;
+ gaPageDir[pagenum >> 10] |= PF_USER;
*ent |= PF_USER;
}
}
/**
* \brief Get the flags on a page
*/
-Uint MM_GetFlags(tVAddr VAddr)
+Uint MM_GetFlags(volatile const void *VAddr)
{
- tTabEnt *ent;
- Uint ret = 0;
+ Uint pagenum = (tVAddr)VAddr >> 12;
// Validity Check
- if( !(gaPageDir[VAddr >> 22] & 1) ) return 0;
- if( !(gaPageTable[VAddr >> 12] & 1) ) return 0;
+ if( !(gaPageDir[pagenum >> 10] & 1) ) return 0;
+ if( !(gaPageTable[pagenum] & 1) ) return 0;
- ent = &gaPageTable[VAddr >> 12];
+ tTabEnt *ent = &gaPageTable[pagenum];
+ Uint ret = 0;
// Read-Only
if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
// Kernel
* \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
* \brief Unmap a hardware page
*/
-void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
+void MM_UnmapHWPages(volatile void *Base, Uint Number)
{
- int i, j;
-
+ tVAddr VAddr = (tVAddr)Base;
//Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
//
if( KERNEL_BASE <= VAddr && VAddr < KERNEL_BASE + 1024*1024 )
- return ;
+ return ;
+
+ Uint pagenum = VAddr >> 12;
// Sanity Check
if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX) return;
- i = VAddr >> 12;
Mutex_Acquire( &glTempMappings ); // Temp and HW share a directory, so they share a lock
- for( j = 0; j < Number; j++ )
+ for( Uint i = 0; i < Number; i ++ )
{
- MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
- gaPageTable[ i + j ] = 0;
- INVLPG( (tVAddr)(i+j) << 12 );
+ MM_DerefPhys( gaPageTable[ pagenum + i ] & ~0xFFF );
+ gaPageTable[ pagenum + i ] = 0;
+ INVLPG( (tVAddr)(pagenum + i) << 12 );
}
Mutex_Release( &glTempMappings );
gProcessZero.MemState.CR3 = (Uint)gaInitPageDir - KERNEL_BASE;
// Create Per-Process Data Block
- if( !MM_Allocate(MM_PPD_CFG) )
+ if( MM_Allocate( (void*)MM_PPD_CFG ) == 0 )
{
Panic("OOM - No space for initial Per-Process Config");
}
*/
Uint Proc_MakeUserStack(void)
{
- int i;
- Uint base = USER_STACK_TOP - USER_STACK_SZ;
+ tPage *base = (void*)(USER_STACK_TOP - USER_STACK_SZ);
// Check Prospective Space
- for( i = USER_STACK_SZ >> 12; i--; )
- if( MM_GetPhysAddr( (void*)(base + (i<<12)) ) != 0 )
- break;
-
- if(i != -1) return 0;
-
+ for( Uint i = USER_STACK_SZ/PAGE_SIZE; i--; )
+ {
+ if( MM_GetPhysAddr( base + i ) != 0 )
+ {
+ Warning("Proc_MakeUserStack: Address %p in use", base + i);
+ return 0;
+ }
+ }
// Allocate Stack - Allocate incrementally to clean up MM_Dump output
- for( i = 0; i < USER_STACK_SZ/0x1000; i++ )
+ for( Uint i = 0; i < USER_STACK_SZ/PAGE_SIZE; i++ )
{
- if( !MM_Allocate( base + (i<<12) ) )
+ if( MM_Allocate( base + i ) == 0 )
{
Warning("OOM: Proc_MakeUserStack");
return 0;
}
}
- return base + USER_STACK_SZ;
+ return (tVAddr)( base + USER_STACK_SZ/PAGE_SIZE );
}
void Proc_StartUser(Uint Entrypoint, Uint Base, int ArgC, const char **ArgV, int DataSize)
struct sVM8086_InternalPages
{
Uint32 Bitmap; // 32 sections = 128 byte blocks
- tVAddr VirtBase;
+ char *VirtBase;
tPAddr PhysAddr;
};
struct sVM8086_InternalData
// Map ROM Area
for(i=0xA0;i<0x100;i++) {
- MM_Map( i * 0x1000, i * 0x1000 );
+ MM_Map( (void*)(i * 0x1000), i * 0x1000 );
}
- MM_Map( 0, 0 ); // IVT / BDA
+ MM_Map( (void*)0, 0 ); // IVT / BDA
if( MM_GetRefCount(0x00000) > 2 ) {
Log_Notice("VM8086", "Ok, who's touched the IVT? (%i)",
MM_GetRefCount(0x00000));
}
- MM_Map( 0x9F000, 0x9F000 ); // Stack / EBDA
+ MM_Map( (void*)0x9F000, 0x9F000 ); // Stack / EBDA
if( MM_GetRefCount(0x9F000) > 2 ) {
Log_Notice("VM8086", "And who's been playing with my EBDA? (%i)",
MM_GetRefCount(0x9F000));
}
// System Stack / Stub
- if( MM_Allocate( 0x100000 ) == 0 ) {
+ if( MM_Allocate( (void*)0x100000 ) == 0 ) {
Log_Error("VM8086", "Unable to allocate memory for stack/stub");
gVM8086_WorkerPID = 0;
Threads_Exit(0, 1);
if(Regs->eip == VM8086_MAGIC_IP && Regs->cs == VM8086_MAGIC_CS
&& Threads_GetPID() == gVM8086_WorkerPID)
{
- int i;
if( gpVM8086_State == (void*)-1 ) {
Log_Log("VM8086", "Worker thread ready and waiting");
gpVM8086_State = NULL;
}
// Log_Log("VM8086", "gpVM8086_State = %p, gVM8086_CallingThread = %i",
// gpVM8086_State, gVM8086_CallingThread);
- if( gpVM8086_State ) {
+ if( gpVM8086_State )
+ {
gpVM8086_State->AX = Regs->eax; gpVM8086_State->CX = Regs->ecx;
gpVM8086_State->DX = Regs->edx; gpVM8086_State->BX = Regs->ebx;
gpVM8086_State->BP = Regs->ebp;
LOG("gpVM8086_State = %p", gpVM8086_State);
LOG("gpVM8086_State->Internal = %p", gpVM8086_State->Internal);
- for( i = 0; i < VM8086_PAGES_PER_INST; i ++ ) {
+ for( Uint i = 0; i < VM8086_PAGES_PER_INST; i ++ )
+ {
if( !gpVM8086_State->Internal->AllocatedPages[i].VirtBase )
continue ;
- MM_Deallocate( VM8086_USER_BASE + i*PAGE_SIZE );
+ MM_Deallocate( (tPage*)VM8086_USER_BASE + i );
}
gpVM8086_State = NULL;
__asm__ __volatile__ ("sti");
Semaphore_Wait(&gVM8086_TasksToDo, 1);
- for( i = 0; i < VM8086_PAGES_PER_INST; i ++ )
+ for( Uint i = 0; i < VM8086_PAGES_PER_INST; i ++ )
{
if( !gpVM8086_State->Internal->AllocatedPages[i].VirtBase )
continue ;
- MM_Map( VM8086_USER_BASE + i*PAGE_SIZE, gpVM8086_State->Internal->AllocatedPages[i].PhysAddr );
+ MM_Map( (tPage*)VM8086_USER_BASE + i, gpVM8086_State->Internal->AllocatedPages[i].PhysAddr );
}
void VM8086_Free(tVM8086 *State)
{
- int i;
// TODO: Make sure the state isn't in use currently
- for( i = VM8086_PAGES_PER_INST; i --; )
+ for( Uint i = VM8086_PAGES_PER_INST; i --; )
MM_UnmapHWPages( State->Internal->AllocatedPages[i].VirtBase, 1);
free(State);
}
rem = nBlocks;
base = 0;
// Scan the bitmap for a free block
- for( j = 0; j < 32; j++ ) {
- if( pages[i].Bitmap & (1 << j) ) {
+ // - 32 blocks per page == 128 bytes per block == 8 segments
+ for( j = 0; j < 32; j++ )
+ {
+ if( pages[i].Bitmap & (1 << j) )
+ {
base = j+1;
rem = nBlocks;
}
*Offset = 0;
LOG("Allocated at #%i,%04x", i, base*8*16);
LOG(" - %x:%x", *Segment, *Offset);
- return (void*)( pages[i].VirtBase + base * 8 * 16 );
+ return pages[i].VirtBase + base * 8 * 16;
}
}
}
return NULL;
}
- pages[i].VirtBase = (tVAddr)MM_AllocDMA(1, -1, &pages[i].PhysAddr);
+ pages[i].VirtBase = MM_AllocDMA(1, -1, &pages[i].PhysAddr);
if( pages[i].VirtBase == 0 ) {
Log_Warning("VM8086", "Unable to allocate data page");
return NULL;
*Segment = (VM8086_USER_BASE + i * 0x1000) / 16;
*Offset = 0;
LOG(" - %04x:%04x", *Segment, *Offset);
- return (void*) pages[i].VirtBase;
+ return pages[i].VirtBase;
}
void *VM8086_GetPointer(tVM8086 *State, Uint16 Segment, Uint16 Offset)
if( State->Internal->AllocatedPages[pg].VirtBase == 0)
return NULL;
else
- return (Uint8*)State->Internal->AllocatedPages[pg].VirtBase + (addr & 0xFFF);
+ return State->Internal->AllocatedPages[pg].VirtBase + (addr & 0xFFF);
}
else
{
if( mask & 1 ) {\r
if( flag ) {\r
// Re-set RO, clear COW\r
- MM_SetFlags(addr, MM_PFLAG_RO, MM_PFLAG_RO|MM_PFLAG_COW);\r
+ MM_SetFlags((void*)addr, MM_PFLAG_RO, MM_PFLAG_RO|MM_PFLAG_COW);\r
}\r
else {\r
- MM_SetFlags(addr, MM_PFLAG_RO|MM_PFLAG_COW, MM_PFLAG_RO|MM_PFLAG_COW);\r
+ MM_SetFlags((void*)addr, MM_PFLAG_RO|MM_PFLAG_COW, MM_PFLAG_RO|MM_PFLAG_COW);\r
}\r
}\r
return 0;\r
*/
#include <acess.h>
#include <stdarg.h>
+#include <debug_hooks.h>
#define DEBUG_MAX_LINE_LEN 256
#define LOCK_DEBUG_OUTPUT 1 // Avoid interleaving of output lines?
#define TRACE_TO_KTERM 0 // Send ENTER/DEBUG/LEAVE to debug?
// === IMPORTS ===
-extern void Threads_Dump(void);
-extern void Heap_Dump(void);
extern void KernelPanic_SetMode(void);
extern void KernelPanic_PutChar(char Ch);
extern void IPStack_SendDebugText(const char *Text);
+extern void VT_SetTerminal(int TerminalID);
// === PROTOTYPES ===
static void Debug_Putchar(char ch);
* drv/vterm_vt100.c
* - Virtual Terminal - VT100 (Kinda) Emulation
*/
-#define DEBUG 1
+#define DEBUG 0
#include "vterm.h"
#define sTerminal sVTerm
// Heap expands in pages
for( Uint i = 0; i < pages; i ++ )
{
- if( !MM_Allocate( (tVAddr)gHeapEnd+(i<<12) ) )
+ if( !MM_Allocate( (tPage*)gHeapEnd + i ) )
{
Warning("OOM - Heap_Extend (%i bytes)");
Heap_Dump();
typedef struct sShortSpinlock tShortSpinlock; //!< Opaque (kinda) spinlock
typedef int bool; //!< Boolean type
typedef Uint64 off_t; //!< VFS Offset
+typedef struct { char _[PAGE_SIZE];} tPage; // Representation of a page for pointer arithmatic
// --- Helper Macros ---
/**
* \param VAddr Virtual Address to allocate at
* \return Physical address allocated
*/
-extern tPAddr MM_Allocate(tVAddr VAddr) __attribute__ ((warn_unused_result));
+extern tPAddr MM_Allocate(volatile void *VAddr) __attribute__ ((warn_unused_result));
+/**
+ * \breif Allocate a zeroed COW page to \a VAddr
+ * \param VAddr Virtual address to allocate at
+ * \return Physical address allocated (don't cache)
+ */
+extern void MM_AllocateZero(volatile void *VAddr);
/**
* \brief Deallocate a page
* \param VAddr Virtual address to unmap
*/
-extern void MM_Deallocate(tVAddr VAddr);
+extern void MM_Deallocate(volatile void *VAddr);
/**
* \brief Map a physical page at \a PAddr to \a VAddr
* \param VAddr Target virtual address
* \param PAddr Physical address to map
* \return Boolean Success
*/
-extern int MM_Map(tVAddr VAddr, tPAddr PAddr);
+extern int MM_Map(volatile void * VAddr, tPAddr PAddr);
/**
* \brief Get the physical address of \a Addr
* \param Addr Address of the page to get the physical address of
* \param Flags New flags value
* \param Mask Flags to set
*/
-extern void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask);
+extern void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask);
/**
* \brief Get the flags on a flag
* \param VAddr Virtual address of page
* \return Flags value of the page
*/
-extern Uint MM_GetFlags(tVAddr VAddr);
+extern Uint MM_GetFlags(volatile const void *VAddr);
/**
* \brief Checks is a memory range is user accessable
* \param VAddr Base address to check
* \return 1 if the memory is all user-accessable, 0 otherwise
*/
-#define MM_IsUser(VAddr) (!(MM_GetFlags((tVAddr)(VAddr))&MM_PFLAG_KERNEL))
+#define MM_IsUser(VAddr) (!(MM_GetFlags((const void*)(VAddr))&MM_PFLAG_KERNEL))
/**
* \brief Temporarily map a page into the address space
* \param PAddr Physical addres to map
* \param VAddr Virtual address allocate by ::MM_MapHWPages or ::MM_AllocDMA
* \param Number Number of pages to free
*/
-extern void MM_UnmapHWPages(tVAddr VAddr, Uint Number);
+extern void MM_UnmapHWPages(volatile void *VAddr, Uint Number);
/**
* \brief Allocate a single physical page
* \return Physical address allocated
extern void Threads_ToggleTrace(int TID);
extern void Heap_Stats(void);
+extern void Proc_PrintBacktrace(void);
+
#endif
void SyscallHandler(tSyscallRegs *Regs);
int Syscall_ValidString(const char *Addr);
int Syscall_Valid(int Size, const void *Addr);
- int Syscall_MM_SetFlags(const void *Addr, Uint Flags, Uint Mask);
+ int Syscall_MM_SetFlags(void *Addr, Uint Flags, Uint Mask);
// === CODE ===
// TODO: Do sanity checking on arguments, ATM the user can really fuck with the kernel
break;
// -- Map an address
- case SYS_MAP: MM_Map(Regs->Arg1, Regs->Arg2); break;
+ case SYS_MAP: MM_Map((void*)Regs->Arg1, Regs->Arg2); break;
// -- Allocate an address
- case SYS_ALLOCATE: ret = MM_Allocate(Regs->Arg1); break;
+ case SYS_ALLOCATE: ret = MM_Allocate((void*)Regs->Arg1); break;
// -- Unmap an address
- case SYS_UNMAP: MM_Deallocate(Regs->Arg1); break;
+ case SYS_UNMAP: MM_Deallocate((void*)Regs->Arg1); break;
// -- Change the protection on an address
case SYS_SETFLAGS:
return CheckMem( Addr, Size );
}
-int Syscall_MM_SetFlags(const void *Addr, Uint Flags, Uint Mask)
+int Syscall_MM_SetFlags(void *Addr, Uint Flags, Uint Mask)
{
tPAddr paddr = MM_GetPhysAddr(Addr);
Flags &= MM_PFLAG_RO|MM_PFLAG_EXEC;
Mask |= MM_PFLAG_COW;
}
}
- MM_SetFlags((tVAddr)Addr, Flags, Mask);
+ MM_SetFlags(Addr, Flags, Mask);
return 0;
}
// Allocate Buffer
if( MM_GetPhysAddr( gaUserHandles ) == 0 )
{
- Uint addr, size;
- size = max_handles * sizeof(tVFS_Handle);
- for(addr = 0; addr < size; addr += 0x1000)
+ tPage *pageptr = (void*)gaUserHandles;
+ size_t size = max_handles * sizeof(tVFS_Handle);
+ for( size_t ofs = 0; ofs < size; ofs ++)
{
- if( !MM_Allocate( (tVAddr)gaUserHandles + addr ) )
+ if( !MM_Allocate( pageptr ) )
{
Warning("OOM - VFS_AllocHandle");
Threads_Exit(0, 0xFF); // Terminate user
}
+ pageptr ++;
}
memset( gaUserHandles, 0, size );
}
// Allocate space if not already
if( MM_GetPhysAddr( gaKernelHandles ) == 0 )
{
- Uint addr, size;
- size = MAX_KERNEL_FILES * sizeof(tVFS_Handle);
- for(addr = 0; addr < size; addr += 0x1000)
+ tPage *pageptr = (void*)gaKernelHandles;
+ size_t size = MAX_KERNEL_FILES * sizeof(tVFS_Handle);
+ for(size_t ofs = 0; ofs < size; ofs += size)
{
- if( !MM_Allocate( (tVAddr)gaKernelHandles + addr ) )
+ if( !MM_Allocate( pageptr ) )
{
Panic("OOM - VFS_AllocHandle");
- Threads_Exit(0, 0xFF); // Terminate application (get some space back)
}
+ pageptr ++;
}
memset( gaKernelHandles, 0, size );
}
void VFS_CloseAllUserHandles(void)
{
- int i;
int max_handles = *Threads_GetMaxFD();
// Check if this process has any handles
if( MM_GetPhysAddr( gaUserHandles ) == 0 )
return ;
- for( i = 0; i < max_handles; i ++ )
+ for( int i = 0; i < max_handles; i ++ )
{
tVFS_Handle *h;
h = &gaUserHandles[i];
if( !MM_GetPhysAddr(h) )
{
void *pg = (void*)( (tVAddr)h & ~(PAGE_SIZE-1) );
- if( !MM_Allocate( (tVAddr)pg ) )
+ if( !MM_Allocate( pg ) )
{
// OOM?
return ;
tPAddr PhysAddrs[MMAP_PAGES_PER_BLOCK];
};
+// === PROTOTYPES ===
+//void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset);
+void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask);
+
// === CODE ===
void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
{
- tVFS_Handle *h;
- tVAddr mapping_dest, mapping_base;
+ tVAddr mapping_base;
int npages, pagenum;
tVFS_MMapPageBlock *pb, *prev;
pagenum = Offset / PAGE_SIZE;
mapping_base = (tVAddr)DestHint;
- mapping_dest = mapping_base & ~(PAGE_SIZE-1);
+ tPage *mapping_dest = (void*)(mapping_base & ~(PAGE_SIZE-1));
- // TODO: Locate space for the allocation
+ if( DestHint == NULL )
+ {
+ // TODO: Locate space for the allocation
+ LEAVE('n');
+ return NULL;
+ }
// Handle anonymous mappings
if( Flags & MMAP_MAP_ANONYMOUS )
{
- size_t ofs = 0;
- LOG("%i pages anonymous to %p", npages, mapping_dest);
- for( ; npages --; mapping_dest += PAGE_SIZE, ofs += PAGE_SIZE )
- {
- if( MM_GetPhysAddr((void*)mapping_dest) ) {
- // TODO: Set flags to COW if needed (well, if shared)
- MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
- LOG("clear from %p, %i bytes", (void*)(mapping_base + ofs),
- PAGE_SIZE - (mapping_base & (PAGE_SIZE-1))
- );
- memset( (void*)(mapping_base + ofs), 0, PAGE_SIZE - (mapping_base & (PAGE_SIZE-1)));
- LOG("dune");
- }
- else {
- LOG("New empty page");
- // TODO: Map a COW zero page instead
- if( !MM_Allocate(mapping_dest) ) {
- // TODO: Error
- Log_Warning("VFS", "VFS_MMap: Anon alloc to %p failed", mapping_dest);
- }
- memset((void*)mapping_dest, 0, PAGE_SIZE);
- LOG("Anon map to %p", mapping_dest);
- }
- }
- LEAVE_RET('p', (void*)mapping_base);
+ // TODO: Comvert \a Protection into a flag set
+ void *ret = VFS_MMap_Anon((void*)mapping_base, Length, 0, 0);
+ LEAVE_RET('p', ret);
}
- h = VFS_GetHandle(FD);
+ tVFS_Handle *h = VFS_GetHandle(FD);
if( !h || !h->Node ) LEAVE_RET('n', NULL);
LOG("h = %p", h);
// - Map (and allocate) pages
while( npages -- )
{
- if( MM_GetPhysAddr( (void*)mapping_dest ) == 0 )
+ if( MM_GetPhysAddr( mapping_dest ) == 0 )
{
if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 )
{
// TODO: error
}
else if( nt->MMap )
- nt->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest);
+ nt->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest);
else
{
int read_len;
}
// TODO: Clip read length
read_len = nt->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE,
- (void*)mapping_dest, 0);
+ mapping_dest, 0);
// TODO: This was commented out, why?
if( read_len != PAGE_SIZE ) {
- memset( (void*)(mapping_dest+read_len), 0, PAGE_SIZE-read_len );
+ memset( (char*)mapping_dest + read_len, 0, PAGE_SIZE-read_len );
}
}
- pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( (void*)mapping_dest );
+ pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node );
MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
if( Flags & MMAP_MAP_PRIVATE )
MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
pagenum ++;
- mapping_dest += PAGE_SIZE;
+ mapping_dest ++;
// Roll on to next block if needed
if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
return (void*)mapping_base;
}
+void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask)
+{
+ size_t ofs = (tVAddr)Destination & (PAGE_SIZE-1);
+ tPage *mapping_dest = (void*)( (char*)Destination - ofs );
+
+ if( ofs > 0 )
+ {
+ size_t bytes = MIN(PAGE_SIZE - ofs, Length);
+
+ // Allocate a partial page
+ if( MM_GetPhysAddr(mapping_dest) )
+ {
+ // Already allocated page, clear the area we're touching
+ ASSERT( ofs + bytes <= PAGE_SIZE );
+
+ // TODO: Double check that this area isn't already zero
+ memset( Destination, 0, bytes );
+
+ MM_SetFlags(mapping_dest, FlagsSet, FlagsMask);
+
+ LOG("#1: Clear %i from %p", Length, Destination);
+ }
+ else
+ {
+ MM_AllocateZero(mapping_dest);
+ LOG("#1: Allocate for %p", Destination);
+ }
+ mapping_dest ++;
+ Length -= bytes;
+ }
+ while( Length >= PAGE_SIZE )
+ {
+ if( MM_GetPhysAddr( mapping_dest ) )
+ {
+ // We're allocating entire pages here, so free this page and replace with a COW zero
+ MM_Deallocate(mapping_dest);
+ LOG("Replace %p with zero page", mapping_dest);
+ }
+ else
+ {
+ LOG("Allocate zero at %p", mapping_dest);
+ }
+ MM_AllocateZero(mapping_dest);
+
+ mapping_dest ++;
+ Length -= PAGE_SIZE;
+ }
+ if( Length > 0 )
+ {
+ ASSERT(Length < PAGE_SIZE);
+
+ // Tail page
+ if( MM_GetPhysAddr(mapping_dest) )
+ {
+ // TODO: Don't touch page if already zero
+ memset( mapping_dest, 0, Length );
+ LOG("Clear %i in %p", Length, mapping_dest);
+ }
+ else
+ {
+ MM_AllocateZero(mapping_dest);
+ LOG("Anon map to %p", mapping_dest);
+ }
+ }
+
+ return Destination;
+}
+
int VFS_MUnmap(void *Addr, size_t Length)
{
return 0;
void BGA_Uninstall(void)\r
{\r
DevFS_DelDevice( &gBGA_DriverStruct );\r
- MM_UnmapHWPages( (tVAddr)gBGA_Framebuffer, 768 );\r
+ MM_UnmapHWPages( gBGA_Framebuffer, 768 );\r
}\r
\r
/**\r
// Map Framebuffer\r
if( gpVesaCurMode )\r
{\r
- if( gpVesaCurMode->framebuffer < 1024*1024 )\r
- ;\r
- else\r
- MM_UnmapHWPages((tVAddr)gpVesa_Framebuffer, giVesaPageCount);\r
+ MM_UnmapHWPages(gpVesa_Framebuffer, giVesaPageCount);\r
}\r
giVesaPageCount = (modeptr->fbSize + 0xFFF) >> 12;\r
- if( modeptr->framebuffer < 1024*1024 )\r
- gpVesa_Framebuffer = (void*)(KERNEL_BASE|modeptr->framebuffer);\r
- else\r
- gpVesa_Framebuffer = (void*)MM_MapHWPages(modeptr->framebuffer, giVesaPageCount);\r
+ gpVesa_Framebuffer = MM_MapHWPages(modeptr->framebuffer, giVesaPageCount);\r
\r
Log_Log("VBE", "Setting mode to %i 0x%x (%ix%i %ibpp) %p[0x%x] maps %P",\r
mode, modeptr->code,\r
i ++;
}
- if( gpPCnet3_InitBlock != &gPCnet3_StaticInitBlock ) {
- MM_UnmapHWPages( (tVAddr)gpPCnet3_InitBlock, 1 );
+ if( gpPCnet3_InitBlock != &gPCnet3_StaticInitBlock )
+ {
+ MM_UnmapHWPages( gpPCnet3_InitBlock, 1 );
}
return MODULE_ERR_OK;
_error:
cont->PhysBase = 0;
if( cont->CapRegs )
- MM_Deallocate( (tVAddr)cont->CapRegs );
+ MM_Deallocate( cont->CapRegs );
if( cont->PeriodicQueue )
- MM_Deallocate( (tVAddr)cont->PeriodicQueue );
+ MM_Deallocate( cont->PeriodicQueue );
if( cont->TDPool )
- MM_Deallocate( (tVAddr)cont->TDPool );
+ MM_Deallocate( cont->TDPool );
LEAVE('i', 2);
return 2;
}