* 0xFE - Unused
* 0xFF - System Calls / Kernel's User Code
*/
-#define DEBUG 1
+#define DEBUG 0
+#define SANITY 1
#include <acess.h>
+#include <mm_virt.h>
#include <mm_phys.h>
#include <proc.h>
+#if USE_PAE
+# define TAB 21
+# define DIR 30
+#else
+# define TAB 22
+#endif
+
#define KERNEL_STACKS 0xF0000000
#define KERNEL_STACK_SIZE 0x00008000
-#define KERNEL_STACKS_END 0xFD000000
+#define KERNEL_STACKS_END 0xFC000000
#define WORKER_STACKS 0x00100000 // Thread0 Only!
#define WORKER_STACK_SIZE KERNEL_STACK_SIZE
#define WORKER_STACKS_END 0xB0000000
#define NUM_WORKER_STACKS ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
-#define PAGE_TABLE_ADDR 0xFD000000
-#define PAGE_DIR_ADDR 0xFD3F4000
-#define PAGE_CR3_ADDR 0xFD3F4FD0
-#define TMP_CR3_ADDR 0xFD3F4FD4 // Part of core instead of temp
-#define TMP_DIR_ADDR 0xFD3F5000 // Same
-#define TMP_TABLE_ADDR 0xFD400000
-#define HW_MAP_ADDR 0xFD800000
-#define HW_MAP_MAX 0xFEFF0000
+
+#define PAE_PAGE_TABLE_ADDR 0xFC000000 // 16 MiB
+#define PAE_PAGE_DIR_ADDR 0xFCFC0000 // 16 KiB
+#define PAE_PAGE_PDPT_ADDR 0xFCFC3F00 // 32 bytes
+#define PAE_TMP_PDPT_ADDR 0xFCFC3F20 // 32 bytes
+#define PAE_TMP_DIR_ADDR 0xFCFE0000 // 16 KiB
+#define PAE_TMP_TABLE_ADDR 0xFD000000 // 16 MiB
+
+#define PAGE_TABLE_ADDR 0xFC000000
+#define PAGE_DIR_ADDR 0xFC3F0000
+#define PAGE_CR3_ADDR 0xFC3F0FC0
+#define TMP_CR3_ADDR 0xFC3F0FC4 // Part of core instead of temp
+#define TMP_DIR_ADDR 0xFC3F1000 // Same
+#define TMP_TABLE_ADDR 0xFC400000
+
+#define HW_MAP_ADDR 0xFE000000
+#define HW_MAP_MAX 0xFFEF0000
#define NUM_HW_PAGES ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
-#define TEMP_MAP_ADDR 0xFEFF0000 // Allows 16 "temp" pages
+#define TEMP_MAP_ADDR 0xFFEF0000 // Allows 16 "temp" pages
#define NUM_TEMP_PAGES 16
+#define LAST_BLOCK_ADDR 0xFFFF0000 // Free space for kernel provided user code/ *(-1) protection
#define PF_PRESENT 0x1
#define PF_WRITE 0x2
#endif
// === IMPORTS ===
+extern void _UsertextEnd, _UsertextBase;
extern Uint32 gaInitPageDir[1024];
extern Uint32 gaInitPageTable[1024];
extern void Threads_SegFault(tVAddr Addr);
extern void Error_Backtrace(Uint eip, Uint ebp);
// === PROTOTYPES ===
-void MM_PreinitVirtual();
-void MM_InstallVirtual();
+void MM_PreinitVirtual(void);
+void MM_InstallVirtual(void);
void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
void MM_DumpTables(tVAddr Start, tVAddr End);
tPAddr MM_DuplicatePage(tVAddr VAddr);
// === GLOBALS ===
#define gaPageTable ((tTabEnt*)PAGE_TABLE_ADDR)
#define gaPageDir ((tTabEnt*)PAGE_DIR_ADDR)
-#define gaPageCR3 ((tTabEnt*)PAGE_CR3_ADDR)
#define gaTmpTable ((tTabEnt*)TMP_TABLE_ADDR)
#define gaTmpDir ((tTabEnt*)TMP_DIR_ADDR)
-#define gTmpCR3 ((tTabEnt*)TMP_CR3_ADDR)
-//tPAddr *gaPageTable = (void*)PAGE_TABLE_ADDR;
-//tPAddr *gaPageDir = (void*)PAGE_DIR_ADDR;
-//tPAddr *gaPageCR3 = (void*)PAGE_CR3_ADDR;
-//tPAddr *gaTmpTable = (void*)TMP_TABLE_ADDR;
-//tPAddr *gaTmpDir = (void*)TMP_DIR_ADDR;
-//tPAddr *gTmpCR3 = (void*)TMP_CR3_ADDR;
- int gilTempMappings = 0;
- int gilTempFractal = 0;
+#define gpPageCR3 ((tTabEnt*)PAGE_CR3_ADDR)
+#define gpTmpCR3 ((tTabEnt*)TMP_CR3_ADDR)
+
+#define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
+#define gaPAE_PageDir ((tTabEnt*)PAE_PAGE_DIR_ADDR)
+#define gaPAE_MainPDPT ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
+#define gaPAE_TmpTable ((tTabEnt*)PAE_TMP_DIR_ADDR)
+#define gaPAE_TmpDir ((tTabEnt*)PAE_TMP_DIR_ADDR)
+#define gaPAE_TmpPDPT ((tTabEnt*)PAE_TMP_PDPT_ADDR)
+ int gbUsePAE = 0;
+tMutex glTempMappings;
+tMutex glTempFractal;
Uint32 gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
int giLastUsedWorker = 0;
// === CODE ===
/**
- * \fn void MM_PreinitVirtual()
+ * \fn void MM_PreinitVirtual(void)
* \brief Maps the fractal mappings
*/
-void MM_PreinitVirtual()
+void MM_PreinitVirtual(void)
{
- gaInitPageDir[ 0 ] = 0;
- gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((Uint)&gaInitPageDir - KERNEL_BASE) | 3;
+ #if USE_PAE
+ gaInitPageDir[ ((PAGE_TABLE_ADDR >> TAB)-3*512+3)*2 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
+ #else
+ gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
+ #endif
INVLPG( PAGE_TABLE_ADDR );
}
/**
- * \fn void MM_InstallVirtual()
+ * \fn void MM_InstallVirtual(void)
* \brief Sets up the constant page mappings
*/
-void MM_InstallVirtual()
+void MM_InstallVirtual(void)
{
int i;
+ #if USE_PAE
+ // --- Pre-Allocate kernel tables
+ for( i = KERNEL_BASE >> TAB; i < 1024*4; i ++ )
+ {
+ if( gaPAE_PageDir[ i ] ) continue;
+
+ // Skip stack tables, they are process unique
+ if( i > KERNEL_STACKS >> TAB && i < KERNEL_STACKS_END >> TAB) {
+ gaPAE_PageDir[ i ] = 0;
+ continue;
+ }
+ // Preallocate table
+ gaPAE_PageDir[ i ] = MM_AllocPhys() | 3;
+ INVLPG( &gaPAE_PageTable[i*512] );
+ memset( &gaPAE_PageTable[i*512], 0, 0x1000 );
+ }
+ #else
// --- Pre-Allocate kernel tables
for( i = KERNEL_BASE>>22; i < 1024; i ++ )
{
INVLPG( &gaPageTable[i*1024] );
memset( &gaPageTable[i*1024], 0, 0x1000 );
}
+ #endif
+
+ // Unset kernel on the User Text pages
+ for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
+ MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
+ }
+}
+
+/**
+ * \brief Cleans up the SMP required mappings
+ */
+void MM_FinishVirtualInit(void)
+{
+ #if USE_PAE
+ gaInitPDPT[ 0 ] = 0;
+ #else
+ gaInitPageDir[ 0 ] = 0;
+ #endif
}
/**
}
else
{
+ //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
paddr = MM_DuplicatePage( Addr );
MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
gaPageTable[Addr>>12] &= PF_USER;
(ErrorCode&1?"bad/locked":"non-present"),
(ErrorCode&16?" (Instruction Fetch)":"")
);
- Warning("User Pagefault: Instruction at %p accessed %p", Regs->eip, Addr);
+ Warning("User Pagefault: Instruction at %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
__asm__ __volatile__ ("sti"); // Restart IRQs
Threads_SegFault(Addr);
return ;
}
+ Debug_KernelPanic();
+
// -- Check Error Code --
if(ErrorCode & 8)
Warning("Reserved Bits Trashed!");
//MM_DumpTables(0, -1);
+ // Register Dump
+ Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
+ Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
+ //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
+ Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
+ Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
+ {
+ Uint dr0, dr1;
+ __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
+ __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
+ Log("DR0 %08x DR1 %08x", dr0, dr1);
+ }
+
Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
}
//LOG("paddr = 0x%llx (new table)", paddr);
if( paddr == 0 ) {
Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
- LEAVE('i',0);
+ //LEAVE('i',0);
return 0;
}
// Map
return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
}
-
-/**
- * \fn int MM_IsUser(tVAddr VAddr)
- * \brief Checks if a page is user accessable
- */
-int MM_IsUser(tVAddr VAddr)
-{
- if( !(gaPageDir[VAddr >> 22] & 1) )
- return 0;
- if( !(gaPageTable[VAddr >> 12] & 1) )
- return 0;
- if( !(gaPageTable[VAddr >> 12] & PF_USER) )
- return 0;
- return 1;
-}
-
/**
- * \fn void MM_SetCR3(tPAddr CR3)
+ * \fn void MM_SetCR3(Uint CR3)
* \brief Sets the current process space
*/
-void MM_SetCR3(tPAddr CR3)
+void MM_SetCR3(Uint CR3)
{
__asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
}
* \fn tVAddr MM_ClearUser()
* \brief Clear user's address space
*/
-tVAddr MM_ClearUser()
+tVAddr MM_ClearUser(void)
{
Uint i, j;
}
INVLPG( gaPageDir );
- return *gaPageCR3;
+ return *gpPageCR3;
}
/**
- * \fn tPAddr MM_Clone()
+ * \fn tPAddr MM_Clone(void)
* \brief Clone the current address space
*/
-tPAddr MM_Clone()
+tPAddr MM_Clone(void)
{
Uint i, j;
tVAddr ret;
tVAddr kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
void *tmp;
- LOCK( &gilTempFractal );
+ Mutex_Acquire( &glTempFractal );
// Create Directory Table
- *gTmpCR3 = MM_AllocPhys() | 3;
+ *gpTmpCR3 = MM_AllocPhys() | 3;
INVLPG( gaTmpDir );
- //LOG("Allocated Directory (%x)", *gTmpCR3);
+ //LOG("Allocated Directory (%x)", *gpTmpCR3);
memsetd( gaTmpDir, 0, 1024 );
// Copy Tables
- for(i=0;i<768;i++)
+ for( i = 0; i < 768; i ++)
{
// Check if table is allocated
if( !(gaPageDir[i] & PF_PRESENT) ) {
{
// Fractal
if( i == (PAGE_TABLE_ADDR >> 22) ) {
- gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gTmpCR3;
+ gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
continue;
}
}
}
- ret = *gTmpCR3 & ~0xFFF;
- RELEASE( &gilTempFractal );
+ ret = *gpTmpCR3 & ~0xFFF;
+ Mutex_Release( &glTempFractal );
//LEAVE('x', ret);
return ret;
}
/**
- * \fn tVAddr MM_NewKStack()
+ * \fn tVAddr MM_NewKStack(void)
* \brief Create a new kernel stack
*/
-tVAddr MM_NewKStack()
+tVAddr MM_NewKStack(void)
{
- tVAddr base = KERNEL_STACKS;
+ tVAddr base;
Uint i;
- for(;base<KERNEL_STACKS_END;base+=KERNEL_STACK_SIZE)
+ for(base = KERNEL_STACKS; base < KERNEL_STACKS_END; base += KERNEL_STACK_SIZE)
{
if(MM_GetPhysAddr(base) != 0) continue;
- for(i=0;i<KERNEL_STACK_SIZE;i+=0x1000) {
+ for(i = 0; i < KERNEL_STACK_SIZE; i += 0x1000) {
MM_Allocate(base+i);
}
+ Log("MM_NewKStack - Allocated %p", base + KERNEL_STACK_SIZE);
return base+KERNEL_STACK_SIZE;
}
Warning("MM_NewKStack - No address space left\n");
//Log(" MM_NewWorkerStack: base = 0x%x", base);
// Acquire the lock for the temp fractal mappings
- LOCK(&gilTempFractal);
+ Mutex_Acquire(&glTempFractal);
// Set the temp fractals to TID0's address space
- *gTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
- //Log(" MM_NewWorkerStack: *gTmpCR3 = 0x%x", *gTmpCR3);
+ *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
+ //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
INVLPG( gaTmpDir );
pages[ addr >> 12 ] = MM_AllocPhys();
gaTmpTable[ (base + addr) >> 12 ] = pages[addr>>12] | 3;
}
- *gTmpCR3 = 0;
+ *gpTmpCR3 = 0;
// Release the temp mapping lock
- RELEASE(&gilTempFractal);
+ Mutex_Release(&glTempFractal);
// Copy the old stack
oldstack = (esp + KERNEL_STACK_SIZE-1) & ~(KERNEL_STACK_SIZE-1);
// Read-Only
if( Mask & MM_PFLAG_RO )
{
- if( Flags & MM_PFLAG_RO ) *ent &= ~PF_WRITE;
- else *ent |= PF_WRITE;
+ if( Flags & MM_PFLAG_RO ) {
+ *ent &= ~PF_WRITE;
+ }
+ else {
+ gaPageDir[VAddr >> 22] |= PF_WRITE;
+ *ent |= PF_WRITE;
+ }
}
// Kernel
if( Mask & MM_PFLAG_KERNEL )
{
- if( Flags & MM_PFLAG_KERNEL ) *ent &= ~PF_USER;
- else *ent |= PF_USER;
+ if( Flags & MM_PFLAG_KERNEL ) {
+ *ent &= ~PF_USER;
+ }
+ else {
+ gaPageDir[VAddr >> 22] |= PF_USER;
+ *ent |= PF_USER;
+ }
}
// Copy-On-Write
*ent |= PF_WRITE;
}
}
+
+ //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
+ // *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
+}
+
+/**
+ * \brief Get the flags on a page
+ */
+Uint MM_GetFlags(tVAddr VAddr)
+{
+ tTabEnt *ent;
+ Uint ret = 0;
+
+ // Validity Check
+ if( !(gaPageDir[VAddr >> 22] & 1) ) return 0;
+ if( !(gaPageTable[VAddr >> 12] & 1) ) return 0;
+
+ ent = &gaPageTable[VAddr >> 12];
+
+ // Read-Only
+ if( !(*ent & PF_WRITE) ) ret |= MM_PFLAG_RO;
+ // Kernel
+ if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
+ // Copy-On-Write
+ if( *ent & PF_COW ) ret |= MM_PFLAG_COW;
+
+ return ret;
}
/**
Uint temp;
int wasRO = 0;
+ //ENTER("xVAddr", VAddr);
+
// Check if mapped
if( !(gaPageDir [VAddr >> 22] & PF_PRESENT) ) return 0;
if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) ) return 0;
if(!wasRO) gaPageTable[VAddr >> 12] |= PF_WRITE;
INVLPG(VAddr);
+ //LEAVE('X', ret);
return ret;
}
PAddr &= ~0xFFF;
- //LOG("gilTempMappings = %i", gilTempMappings);
+ //LOG("glTempMappings = %i", glTempMappings);
for(;;)
{
- LOCK( &gilTempMappings );
+ Mutex_Acquire( &glTempMappings );
for( i = 0; i < NUM_TEMP_PAGES; i ++ )
{
gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
INVLPG( TEMP_MAP_ADDR + (i << 12) );
//LEAVE('p', TEMP_MAP_ADDR + (i << 12));
- RELEASE( &gilTempMappings );
+ Mutex_Release( &glTempMappings );
return TEMP_MAP_ADDR + (i << 12);
}
- RELEASE( &gilTempMappings );
- Threads_Yield();
+ Mutex_Release( &glTempMappings );
+ Threads_Yield(); // TODO: Less expensive
}
}
}
/**
- * \fn tVAddr MM_MapHWPage(tPAddr PAddr, Uint Number)
+ * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
* \brief Allocates a contigous number of pages
*/
-tVAddr MM_MapHWPage(tPAddr PAddr, Uint Number)
+tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
{
int i, j;
{
phys = MM_AllocPhys();
*PhysAddr = phys;
- ret = MM_MapHWPage(phys, 1);
+ ret = MM_MapHWPages(phys, 1);
if(ret == 0) {
MM_DerefPhys(phys);
LEAVE('i', 0);
}
// Slow Allocate
- phys = MM_AllocPhysRange(Pages);
+ phys = MM_AllocPhysRange(Pages, MaxBits);
// - Was it allocated?
if(phys == 0) {
LEAVE('i', 0);
return 0;
}
- // - Check if the memory is OK
- if(phys + (Pages-1)*0x1000 > maxCheck)
- {
- // Deallocate and return 0
- for(;Pages--;phys+=0x1000)
- MM_DerefPhys(phys);
- LEAVE('i', 0);
- return 0;
- }
// Allocated successfully, now map
- ret = MM_MapHWPage(phys, Pages);
+ ret = MM_MapHWPages(phys, Pages);
if( ret == 0 ) {
// If it didn't map, free then return 0
for(;Pages--;phys+=0x1000)
}
/**
- * \fn void MM_UnmapHWPage(tVAddr VAddr, Uint Number)
+ * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
* \brief Unmap a hardware page
*/
-void MM_UnmapHWPage(tVAddr VAddr, Uint Number)
+void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
{
int i, j;
+
+ //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
+
// Sanity Check
- if(VAddr < HW_MAP_ADDR || VAddr-Number*0x1000 > HW_MAP_MAX) return;
+ if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX) return;
i = VAddr >> 12;
- LOCK( &gilTempMappings ); // Temp and HW share a directory, so they share a lock
+ Mutex_Acquire( &glTempMappings ); // Temp and HW share a directory, so they share a lock
for( j = 0; j < Number; j++ )
{
- MM_DerefPhys( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] );
- gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = 0;
+ MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
+ gaPageTable[ i + j ] = 0;
}
- RELEASE( &gilTempMappings );
+ Mutex_Release( &glTempMappings );
}
// --- EXPORTS ---
EXPORT(MM_GetPhysAddr);
EXPORT(MM_Map);
//EXPORT(MM_Unmap);
-EXPORT(MM_MapHWPage);
+EXPORT(MM_MapHWPages);
EXPORT(MM_AllocDMA);
-EXPORT(MM_UnmapHWPage);
+EXPORT(MM_UnmapHWPages);