include $(ACESSDIR)/BuildConf/x86/default.mk
-MODULES := $(filter-out Display/VESA,$(MODULES))
+MODULES := $(filter-out Interfaces/UDI,$(MODULES))
*/
#include <acess.h>
#include <drv_serial.h>
+#include <debug_hooks.h>
// === CONSTANTS ===
//#define UART0_BASE 0x10009000
int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi);
tVAddr MM_NewUserStack(void);
-tPAddr MM_AllocateZero(tVAddr VAddr);
+//tPAddr MM_AllocateZero(volatile void *VAddr);
tPAddr MM_AllocateRootTable(void);
void MM_int_CloneTable(Uint32 *DestEnt, int Table);
tPAddr MM_Clone(int ClearUser);
return pi.PhysAddr | ((tVAddr)Ptr & ((1 << pi.Size)-1));
}
-Uint MM_GetFlags(tVAddr VAddr)
+Uint MM_GetFlags(const volatile void *VAddr)
{
tMM_PageInfo pi;
int ret;
- if( MM_int_GetPageInfo(VAddr, &pi) )
+ if( MM_int_GetPageInfo((tVAddr)VAddr, &pi) )
return 0;
ret = 0;
return ret;
}
-void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
+void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask)
{
tMM_PageInfo pi;
Uint curFlags;
- if( MM_int_GetPageInfo(VAddr, &pi) )
+ if( MM_int_GetPageInfo((tVAddr)VAddr, &pi) )
return ;
curFlags = MM_GetFlags(VAddr);
pi.bExecutable = !!(curFlags & MM_PFLAG_EXEC);
- MM_int_SetPageInfo(VAddr, &pi);
+ MM_int_SetPageInfo((tVAddr)VAddr, &pi);
}
int MM_IsValidBuffer(tVAddr Addr, size_t Size)
Size += Addr & (PAGE_SIZE-1);
Addr &= ~(PAGE_SIZE-1);
- if( MM_int_GetPageInfo(Addr, &pi) ) return 0;
+ if( MM_int_GetPageInfo((tVAddr)Addr, &pi) ) return 0;
Addr += PAGE_SIZE;
if(pi.AP != AP_KRW_ONLY && pi.AP != AP_KRO_ONLY)
return 1;
}
-int MM_Map(tVAddr VAddr, tPAddr PAddr)
+int MM_Map(volatile void *VAddr, tPAddr PAddr)
{
tMM_PageInfo pi = {0};
#if TRACE_MAPS
Log("MM_Map %P=>%p", PAddr, VAddr);
#endif
+ // TODO: Double check that an address isn't being clobbered
+
pi.PhysAddr = PAddr;
pi.Size = 12;
- if(VAddr < USER_STACK_TOP)
- pi.AP = AP_RW_BOTH;
- else
- pi.AP = AP_KRW_ONLY; // Kernel Read/Write
+ pi.AP = ( (tVAddr)VAddr < USER_STACK_TOP ? AP_RW_BOTH : AP_KRW_ONLY );
pi.bExecutable = 1;
- if( MM_int_SetPageInfo(VAddr, &pi) ) {
+ if( MM_int_SetPageInfo( (tVAddr)VAddr, &pi) ) {
// MM_DerefPhys(pi.PhysAddr);
return 0;
}
return pi.PhysAddr;
}
-tPAddr MM_Allocate(tVAddr VAddr)
+tPAddr MM_Allocate(volatile void *VAddr)
{
tMM_PageInfo pi = {0};
pi.PhysAddr = MM_AllocPhys();
if( pi.PhysAddr == 0 ) LEAVE_RET('i', 0);
pi.Size = 12;
- if(VAddr < USER_STACK_TOP)
- pi.AP = AP_RW_BOTH;
- else
- pi.AP = AP_KRW_ONLY;
+ pi.AP = ( (tVAddr)VAddr < USER_STACK_TOP ? AP_RW_BOTH : AP_KRW_ONLY );
pi.bExecutable = 0;
- if( MM_int_SetPageInfo(VAddr, &pi) ) {
+ if( MM_int_SetPageInfo( (tVAddr)VAddr, &pi ) )
+ {
MM_DerefPhys(pi.PhysAddr);
LEAVE('i', 0);
return 0;
return pi.PhysAddr;
}
-tPAddr MM_AllocateZero(tVAddr VAddr)
+void MM_AllocateZero(volatile void *VAddr)
{
if( !giMM_ZeroPage ) {
giMM_ZeroPage = MM_Allocate(VAddr);
MM_Map(VAddr, giMM_ZeroPage);
}
MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
- return giMM_ZeroPage;
}
-void MM_Deallocate(tVAddr VAddr)
+void MM_Deallocate(volatile void *VAddr)
{
tMM_PageInfo pi;
- if( MM_int_GetPageInfo(VAddr, &pi) ) return ;
+ if( MM_int_GetPageInfo((tVAddr)VAddr, &pi) ) return ;
if( pi.PhysAddr == 0 ) return;
MM_DerefPhys(pi.PhysAddr);
pi.PhysAddr = 0;
pi.AP = 0;
pi.bExecutable = 0;
- MM_int_SetPageInfo(VAddr, &pi);
+ MM_int_SetPageInfo((tVAddr)VAddr, &pi);
}
tPAddr MM_AllocateRootTable(void)
void *MM_MapTemp(tPAddr PAddr)
{
- tVAddr ret;
- tMM_PageInfo pi;
-
- for( ret = MM_TMPMAP_BASE; ret < MM_TMPMAP_END - PAGE_SIZE; ret += PAGE_SIZE )
+ for( tVAddr ret = MM_TMPMAP_BASE; ret < MM_TMPMAP_END - PAGE_SIZE; ret += PAGE_SIZE )
{
+ tMM_PageInfo pi;
+
if( MM_int_GetPageInfo(ret, &pi) == 0 )
continue;
// Log("MapTemp %P at %p by %p", PAddr, ret, __builtin_return_address(0));
MM_RefPhys(PAddr); // Counter the MM_Deallocate in FreeTemp
- MM_Map(ret, PAddr);
+ MM_Map( (void*)ret, PAddr );
return (void*)ret;
}
return ;
}
- MM_Deallocate(VAddr);
+ MM_Deallocate(Ptr);
}
void *MM_MapHWPages(tPAddr PAddr, Uint NPages)
// Map the pages
for( i = 0; i < NPages; i ++ )
- MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAGE_SIZE);
+ MM_Map( (tPage*)ret + i, PAddr+i*PAGE_SIZE);
// and return
LEAVE('p', ret);
return (void*)ret;
return ret;
}
-void MM_UnmapHWPages(tVAddr Vaddr, Uint Number)
+void MM_UnmapHWPages(volatile void *VAddr, Uint Number)
{
Log_Error("MMVirt", "TODO: Implement MM_UnmapHWPages");
}
}
// 1 guard page
+ tPage *pageptr = (void*)(addr + PAGE_SIZE);
for( ofs = PAGE_SIZE; ofs < MM_KSTACK_SIZE; ofs += PAGE_SIZE )
{
- if( MM_Allocate(addr + ofs) == 0 )
+ if( MM_Allocate( pageptr ) == 0 )
{
while(ofs)
{
ofs -= PAGE_SIZE;
- MM_Deallocate(addr + ofs);
+ MM_Deallocate( pageptr-- );
}
Log_Warning("MMVirt", "MM_NewKStack: Unable to allocate");
return 0;
}
// 1 guard page
- for( ofs = PAGE_SIZE; ofs < USER_STACK_SIZE; ofs += PAGE_SIZE )
+ tPage *pageptr = (void*)addr;
+ for( ofs = PAGE_SIZE; ofs < USER_STACK_SIZE; ofs += PAGE_SIZE, pageptr ++ )
{
- tPAddr rv;
- if(ofs >= USER_STACK_SIZE - USER_STACK_COMM)
- rv = MM_Allocate(addr + ofs);
- else
- rv = MM_AllocateZero(addr + ofs);
- if(rv == 0)
- {
- while(ofs)
+ if(ofs >= USER_STACK_SIZE - USER_STACK_COMM) {
+ tPAddr rv = MM_Allocate(pageptr);
+ if(rv == 0)
{
- ofs -= PAGE_SIZE;
- MM_Deallocate(addr + ofs);
+ while(ofs)
+ {
+ ofs -= PAGE_SIZE;
+ MM_Deallocate(pageptr --);
+ }
+ Log_Warning("MMVirt", "MM_NewUserStack: Unable to allocate");
+ return 0;
}
- Log_Warning("MMVirt", "MM_NewUserStack: Unable to allocate");
- return 0;
}
- MM_SetFlags(addr+ofs, 0, MM_PFLAG_KERNEL);
+ else {
+ MM_AllocateZero(pageptr);
+ }
+ MM_SetFlags(pageptr, 0, MM_PFLAG_KERNEL);
}
Log("Return %p", addr + ofs);
// MM_DumpTables(0, 0x80000000);
#include <pmemmap.h>
#include <hal_proc.h>
#include <semaphore.h>
+#include <debug_hooks.h>
//#define USE_STACK 1
#define TRACE_ALLOCS 0 // Print trace messages on AllocPhys/DerefPhys
static const int addrClasses[] = {0,16,20,24,32,64};
static const int numAddrClasses = sizeof(addrClasses)/sizeof(addrClasses[0]);
-// === IMPORTS ===
-extern void Proc_PrintBacktrace(void);
-
// === PROTOTYPES ===
void MM_Install(int NPMemRanges, tPMemMapEnt *PMemRanges);
//tPAddr MM_AllocPhys(void);
#include <proc.h>
#include <mm_virt.h>
#include <threads_int.h> // Needed for SSE handling
+#include <debug_hooks.h>
#define MAX_BACKTRACE 6
extern int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
extern void Error_Backtrace(Uint IP, Uint BP);
extern void Proc_EnableSSE(void);
-extern void Threads_Dump(void);
extern void Proc_RestoreSSE(Uint32 Data);
// === PROTOTYPES ===
extern void Debug_PutCharDebug(char ch);
extern void Debug_PutStringDebug(const char *Str);
-// TODO: Move this to acess.h
-extern tPAddr MM_AllocateZero(tVAddr VAddr);
-
#endif
// === PROTOTYPEs ===
int putDebugChar(char ch);
+void Debug_SerialIRQHandler(int irq, void *unused);
// === CODE ===
/**
// Unmap and free
numPages = (gaArch_BootModules[i].Size + ((Uint)gaArch_BootModules[i].Base&0xFFF) + 0xFFF) >> 12;
- MM_UnmapHWPages( (tVAddr)gaArch_BootModules[i].Base, numPages );
+ MM_UnmapHWPages( gaArch_BootModules[i].Base, numPages );
for( j = 0; j < numPages; j++ )
MM_DerefPhys( gaArch_BootModules[i].PBase + (j << 12) );
+ // TODO: What the fuck?
if( (tVAddr) gaArch_BootModules[i].ArgString < KERNEL_BASE )
- MM_UnmapHWPages( (tVAddr)gaArch_BootModules[i].ArgString, 2 );
+ MM_UnmapHWPages( gaArch_BootModules[i].ArgString, 2 );
}
Log_Log("Arch", "Boot modules loaded");
if( gaArch_BootModules )
#include <archinit.h>
#include <pmemmap.h>
#include <mm_virt.h>
+#include <debug_hooks.h>
#define TRACE_REF 0
int i;
Uint64 base, size;
tVAddr vaddr;
- tPAddr paddr, firstFreePage;
+ tPAddr paddr;
ENTER("iNPMemRanges pPMemRanges",
NPMemRanges, PMemRanges);
if( i == NUM_STATIC_ALLOC )
{
// Map
- MM_Map(vaddr, paddr);
+ MM_Map((void*)vaddr, paddr);
todo --;
// Update virtual pointer
PMemMap_DumpBlocks(PMemRanges, NPMemRanges);
// Save the current value of paddr to simplify the allocation later
- firstFreePage = paddr;
+ giFirstFreePage = paddr;
LOG("Clearing multi bitmap");
// Fill the bitmaps (set most to "allocated")
void MM_DumpStatistics(void)
{
// TODO: Statistics for x86_64 PMM
+ Log_Warning("PMem", "TODO: Dump statistics");
}
/**
// TODO: Page out
// ATM. Just Warning
Warning(" MM_AllocPhysRange: Out of free pages");
- Log_Warning("Arch",
+ Log_Warning("PMem",
"Out of memory (unable to fulfil request for %i pages), zero remaining",
Pages
);
// TODO: Page out
// ATM. Just Warning
Warning(" MM_AllocPhysRange: Out of memory (unable to fulfil request for %i pages)", Pages);
- Log_Warning("Arch",
+ Log_Warning("PMem",
"Out of memory (unable to fulfil request for %i pages)",
Pages
);
const int pages_per_refpage = PAGE_SIZE/sizeof(gaiPageReferences[0]);
int i;
int page_base = page / pages_per_refpage * pages_per_refpage;
- if( !MM_Allocate( ref_base ) ) {
- Log_Error("Arch", "Out of memory when allocating reference count page");
+ if( !MM_Allocate( (void*)ref_base ) ) {
+ Log_Error("PMem", "Out of memory when allocating reference count page");
return ;
}
// Fill block
int MM_SetPageNode(tPAddr PAddr, void *Node)
{
tPAddr page = PAddr >> 12;
- tVAddr node_page = ((tVAddr)&gapPageNodes[page]) & ~(PAGE_SIZE-1);
+ void *node_page = (void*)( ((tVAddr)&gapPageNodes[page]) & ~(PAGE_SIZE-1) );
// if( !MM_GetRefCount(PAddr) ) return 1;
- if( !MM_GetPhysAddr((void*)node_page) ) {
+ if( !MM_GetPhysAddr(node_page) ) {
if( !MM_Allocate(node_page) )
return -1;
- memset( (void*)node_page, 0, PAGE_SIZE );
+ memset( node_page, 0, PAGE_SIZE );
}
gapPageNodes[page] = Node;
void MM_int_DumpTablesEnt(tVAddr RangeStart, size_t Length, tPAddr Expected);
//void MM_DumpTables(tVAddr Start, tVAddr End);
int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer);
- int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
+ int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge);
// int MM_Map(tVAddr VAddr, tPAddr PAddr);
void MM_Unmap(tVAddr VAddr);
void MM_int_ClearTableLevel(tVAddr VAddr, int LevelBits, int MaxEnts);
// === GLOBALS ===
tMutex glMM_TempFractalLock;
+tShortSpinlock glMM_ZeroPage;
tPAddr gMM_ZeroPage;
// === CODE ===
* \param bTemp Use tempoary mappings
* \param bLarge Treat as a large page
*/
-int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
+int MM_MapEx(volatile void *VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge)
{
tPAddr *ent;
int rv;
ENTER("pVAddr PPAddr", VAddr, PAddr);
// Get page pointer (Allow allocating)
- rv = MM_GetPageEntryPtr(VAddr, bTemp, 1, bLarge, &ent);
+ rv = MM_GetPageEntryPtr( (tVAddr)VAddr, bTemp, 1, bLarge, &ent);
if(rv < 0) LEAVE_RET('i', 0);
if( *ent & 1 ) LEAVE_RET('i', 0);
*ent = PAddr | 3;
- if( VAddr < 0x800000000000 )
+ if( (tVAddr)VAddr < USER_MAX )
*ent |= PF_USER;
-
INVLPG( VAddr );
LEAVE('i', 1);
* \param VAddr Target virtual address
* \param PAddr Physical address of page
*/
-int MM_Map(tVAddr VAddr, tPAddr PAddr)
+int MM_Map(volatile void *VAddr, tPAddr PAddr)
{
return MM_MapEx(VAddr, PAddr, 0, 0);
}
// Check Page Dir
if( !(PAGEDIR(VAddr >> 21) & 1) ) return ;
- PAGETABLE(VAddr >> PTAB_SHIFT) = 0;
+ tPAddr *ent = &PAGETABLE(VAddr >> PTAB_SHIFT);
+ *ent = 0;
INVLPG( VAddr );
}
/**
* \brief Allocate a block of memory at the specified virtual address
*/
-tPAddr MM_Allocate(tVAddr VAddr)
+tPAddr MM_Allocate(volatile void *VAddr)
{
tPAddr ret;
- ENTER("xVAddr", VAddr);
+ ENTER("pVAddr", VAddr);
// Ensure the tables are allocated before the page (keeps things neat)
- MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
+ MM_GetPageEntryPtr( (tVAddr)VAddr, 0, 1, 0, NULL );
// Allocate the page
ret = MM_AllocPhys();
return ret;
}
-tPAddr MM_AllocateZero(tVAddr VAddr)
+void MM_AllocateZero(volatile void *VAddr)
{
- tPAddr ret = gMM_ZeroPage;
-
- MM_GetPageEntryPtr(VAddr, 0, 1, 0, NULL);
+ // Ensure dir is populated
+ MM_GetPageEntryPtr((tVAddr)VAddr, 0, 1, 0, NULL);
- if(!gMM_ZeroPage) {
- ret = gMM_ZeroPage = MM_AllocPhys();
- MM_RefPhys(ret); // Don't free this please
- MM_Map(VAddr, ret);
- memset((void*)VAddr, 0, 0x1000);
+ if(!gMM_ZeroPage)
+ {
+ SHORTLOCK(&glMM_ZeroPage);
+ if( !gMM_ZeroPage )
+ {
+ gMM_ZeroPage = MM_AllocPhys();
+ MM_Map(VAddr, gMM_ZeroPage);
+ memset((void*)VAddr, 0, PAGE_SIZE);
+ }
+ SHORTREL(&glMM_ZeroPage);
}
- else {
- MM_Map(VAddr, ret);
+ else
+ {
+ MM_Map(VAddr, gMM_ZeroPage);
}
- MM_RefPhys(ret); // Refernce for this map
+ MM_RefPhys(gMM_ZeroPage); // Refernce for this map
MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW);
- return ret;
}
/**
* \brief Deallocate a page at a virtual address
*/
-void MM_Deallocate(tVAddr VAddr)
+void MM_Deallocate(volatile void *VAddr)
{
- tPAddr phys;
-
- phys = MM_GetPhysAddr( (void*)VAddr );
+ tPAddr phys = MM_GetPhysAddr( VAddr );
if(!phys) return ;
- MM_Unmap(VAddr);
+ MM_Unmap((tVAddr)VAddr);
MM_DerefPhys(phys);
}
/**
* \brief Sets the flags on a page
*/
-void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
+void MM_SetFlags(volatile void *VAddr, Uint Flags, Uint Mask)
{
tPAddr *ent;
int rv;
// Get pointer
- rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
+ rv = MM_GetPageEntryPtr( (tVAddr)VAddr, 0, 0, 0, &ent);
if(rv < 0) return ;
// Ensure the entry is valid
if( Flags & MM_PFLAG_COW ) {
*ent &= ~PF_WRITE;
*ent |= PF_COW;
- INVLPG_ALL();
}
else {
*ent &= ~PF_COW;
/**
* \brief Get the flags applied to a page
*/
-Uint MM_GetFlags(tVAddr VAddr)
+Uint MM_GetFlags(volatile const void *VAddr)
{
tPAddr *ent;
int rv, ret = 0;
- rv = MM_GetPageEntryPtr(VAddr, 0, 0, 0, &ent);
+ rv = MM_GetPageEntryPtr((tVAddr)VAddr, 0, 0, 0, &ent);
if(rv < 0) return 0;
if( !(*ent & 1) ) return 0;
*/
void *MM_MapHWPages(tPAddr PAddr, Uint Number)
{
- tVAddr ret;
- int num;
-
//TODO: Add speedups (memory of first possible free)
- for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_TOP; ret += 0x1000 )
+ for( tPage *ret = (void*)MM_HWMAP_BASE; ret < (tPage*)MM_HWMAP_TOP; ret ++ )
{
- for( num = Number; num -- && ret < MM_HWMAP_TOP; ret += 0x1000 )
+ // Check if this region has already been used
+ int num;
+ for( num = Number; num -- && ret < (tPage*)MM_HWMAP_TOP; ret ++ )
{
- if( MM_GetPhysAddr( (void*)ret ) != 0 )
+ if( MM_GetPhysAddr( ret ) != 0 )
break;
}
if( num >= 0 ) continue;
// Log_Debug("MMVirt", "Mapping %i pages to %p (base %P)", Number, ret-Number*0x1000, PAddr);
+ // Map backwards (because `ret` is at the top of the region atm)
PAddr += 0x1000 * Number;
-
while( Number -- )
{
- ret -= 0x1000;
+ ret --;
PAddr -= 0x1000;
MM_Map(ret, PAddr);
MM_RefPhys(PAddr);
}
- return (void*)ret;
+ return ret;
}
Log_Error("MM", "MM_MapHWPages - No space for %i pages", Number);
/**
* \brief Free a range of hardware pages
*/
-void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
+void MM_UnmapHWPages(volatile void *VAddr, Uint Number)
{
// Log_KernelPanic("MM", "TODO: Implement MM_UnmapHWPages");
+ tPage *page = (void*)VAddr;
while( Number -- )
{
- MM_DerefPhys( MM_GetPhysAddr((void*)VAddr) );
- MM_Unmap(VAddr);
- VAddr += 0x1000;
+ MM_DerefPhys( MM_GetPhysAddr(page) );
+ MM_Unmap((tVAddr)page);
+ page ++;
}
}
void MM_FreeTemp(void *Ptr)
{
- MM_Deallocate((tVAddr)Ptr);
- return ;
+ MM_Deallocate(Ptr);
}
{
tPAddr ret;
int i;
- tVAddr kstackbase;
// #1 Create a copy of the PML4
ret = MM_AllocPhys();
// #6 Create kernel stack
// tThread->KernelStack is the top
// There is 1 guard page below the stack
- kstackbase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
+ tPage *kstackbase = (void*)( Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE );
// Clone stack
TMPMAPLVL4(MM_KSTACK_BASE >> PML4_SHIFT) = 0;
- for( i = 1; i < KERNEL_STACK_SIZE/0x1000; i ++ )
+ for( i = 1; i < KERNEL_STACK_SIZE/PAGE_SIZE; i ++ )
{
tPAddr phys = MM_AllocPhys();
void *tmpmapping;
- MM_MapEx(kstackbase+i*0x1000, phys, 1, 0);
+ MM_MapEx(kstackbase + i, phys, 1, 0);
tmpmapping = MM_MapTemp(phys);
- if( MM_GetPhysAddr( (void*)(kstackbase+i*0x1000) ) )
- memcpy(tmpmapping, (void*)(kstackbase+i*0x1000), 0x1000);
+ // If the current thread's stack is shorter than the new one, zero
+ if( MM_GetPhysAddr( kstackbase + i ) )
+ memcpy(tmpmapping, kstackbase + i, 0x1000);
else
memset(tmpmapping, 0, 0x1000);
// if( i == 0xF )
Log_Error("MM", "MM_NewWorkerStack - Unable to allocate page");
return 0;
}
- MM_MapEx(ret + i*0x1000, phys, 1, 0);
- MM_SetFlags(ret + i*0x1000, MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL);
+ MM_MapEx( (void*)(ret + i*0x1000), phys, 1, 0);
+ // XXX: ... this doesn't change the correct address space
+ MM_SetFlags( (void*)(ret + i*0x1000), MM_PFLAG_KERNEL|MM_PFLAG_RO, MM_PFLAG_KERNEL);
}
// Copy data
//Log("MM_NewKStack: Found one at %p", base + KERNEL_STACK_SIZE);
for( i = 0x1000; i < KERNEL_STACK_SIZE; i += 0x1000)
{
- if( !MM_Allocate(base+i) )
+ if( !MM_Allocate( (void*)(base+i) ) )
{
Log_Warning("MM", "MM_NewKStack - Allocation failed");
for( i -= 0x1000; i; i -= 0x1000)
- MM_Deallocate(base+i);
+ MM_Deallocate((void*)(base+i));
return 0;
}
}
outb(0x40, (PIT_TIMER_DIVISOR>>8)&0xFF); // High Byte
// Create Per-Process Data Block
- if( !MM_Allocate(MM_PPD_CFG) )
+ if( !MM_Allocate( (void*)MM_PPD_CFG ) )
{
Warning("Oh, hell, Unable to allocate PPD for Thread#0");
}
*/
Uint Proc_MakeUserStack(void)
{
- int i;
- Uint base = USER_STACK_TOP - USER_STACK_SZ;
+ tPage *base = (void*)(USER_STACK_TOP - USER_STACK_SZ);
// Check Prospective Space
- for( i = USER_STACK_SZ >> 12; i--; )
+ for( int i = USER_STACK_SZ/PAGE_SIZE; i--; )
{
- if( MM_GetPhysAddr( (void*)(base + (i<<12)) ) != 0 )
- break;
+ if( MM_GetPhysAddr( base + i ) != 0 )
+ {
+ return 0;
+ }
}
- if(i != -1) return 0;
-
// Allocate Stack - Allocate incrementally to clean up MM_Dump output
// - Most of the user stack is the zero page
- for( i = 0; i < (USER_STACK_SZ-USER_STACK_PREALLOC)/0x1000; i++ )
+ int i = 0;
+ for( ; i < (USER_STACK_SZ-USER_STACK_PREALLOC)/PAGE_SIZE; i++ )
{
- MM_AllocateZero( base + (i<<12) );
+ MM_AllocateZero( base + i );
}
// - but the top USER_STACK_PREALLOC pages are actually allocated
- for( ; i < USER_STACK_SZ/0x1000; i++ )
+ for( ; i < USER_STACK_SZ/PAGE_SIZE; i++ )
{
- tPAddr alloc = MM_Allocate( base + (i<<12) );
+ tPAddr alloc = MM_Allocate( base + i );
if( !alloc )
{
// Error
- Log_Error("Proc", "Unable to allocate user stack (%i pages requested)", USER_STACK_SZ/0x1000);
+ Log_Error("Proc", "Unable to allocate user stack (%i pages requested)", USER_STACK_SZ/PAGE_SIZE);
while( i -- )
- MM_Deallocate( base + (i<<12) );
+ MM_Deallocate( base + i );
return 0;
}
}
- return base + USER_STACK_SZ;
+ return (tVAddr)( base + USER_STACK_SZ/PAGE_SIZE );
}
void Proc_StartUser(Uint Entrypoint, Uint Base, int ArgC, const char **ArgV, int DataSize)
extern void Threads_Dump(void);
extern void Threads_ToggleTrace(int TID);
extern void Heap_Stats(void);
+extern void MM_DumpStatistics(void);
extern void Proc_PrintBacktrace(void);
*/
extern void MM_DumpTables(tVAddr Start, tVAddr End);
-/**
- * \brief Dump physical memory usage statistics to the debug channel
- */
-extern void MM_DumpStatistics(void);
-
/**
* \brief Check if a buffer is valid (and all user if originally user)
* \param Addr Base address
*
* Uses 4.125+PtrSize bytes per page
*/
+#include <debug_hooks.h>
#define MM_PAGE_REFCOUNTS MM_PMM_BASE
#define MM_PAGE_NODES (MM_PMM_BASE+(MM_MAXPHYSPAGE*sizeof(Uint32)))
int MM_int_GetRangeID( tPAddr Addr );
int MM_int_GetMapEntry( void *Data, int Index, tPAddr *Start, tPAddr *Length );
void MM_Tpl_InitPhys(int MaxRAMPage, void *MemoryMap);
-void MM_DumpStatistics(void);
// === GLOBALS ===
tMutex glPhysicalPages;
// Only need to allocate bitmaps
if( !MM_GetPhysAddr( (void*)bitmap_page ) ) {
- if( !MM_Allocate( bitmap_page ) ) {
+ if( !MM_Allocate( (void*)bitmap_page ) ) {
Log_KernelPanic("PMM", "Out of memory during init, this is bad");
return ;
}
if( !MM_GetPhysAddr( (void*)refpage ) )
{
int pages_per_page, basepage, i;
- if( MM_Allocate(refpage) == 0 ) {
+ if( MM_Allocate( (void*) refpage) == 0 ) {
// Out of memory, can this be resolved?
// TODO: Reclaim memory
Log_Error("PMM", "Out of memory (MM_RefPhys)");
if( !MM_GetRefCount(PAddr) ) return 1;
if( !MM_GetPhysAddr( (void*)node_page ) ) {
- if( !MM_Allocate(node_page) )
+ if( !MM_Allocate( (void*)node_page) )
return -1;
memset( (void*)node_page, 0, PAGE_SIZE );
}
gpPL110_IOMem->LCDTiming3 = 0;\r
\r
if( gpPL110_Framebuffer ) {\r
- MM_UnmapHWPages((tVAddr)gpPL110_Framebuffer, (giPL110_FramebufferSize+0xFFF)>>12);\r
+ MM_UnmapHWPages(gpPL110_Framebuffer, (giPL110_FramebufferSize+0xFFF)>>12);\r
}\r
giPL110_FramebufferSize = W*H*4;\r
\r
- gpPL110_Framebuffer = (void*)MM_AllocDMA( (giPL110_FramebufferSize+0xFFF)>>12, 32, &gPL110_FramebufferPhys );\r
+ gpPL110_Framebuffer = MM_AllocDMA( (giPL110_FramebufferSize+0xFFF)>>12, 32, &gPL110_FramebufferPhys );\r
gpPL110_IOMem->LCDUPBase = gPL110_FramebufferPhys;\r
gpPL110_IOMem->LCDLPBase = 0;\r
\r
#include <Input/Keyboard/include/keyboard.h>
#include "keymap_int.h"
#include "layout_kbdus.h"
-#include <hal_proc.h>
#include <debug_hooks.h>
#define USE_KERNEL_MAGIC 1
int elf_doRelocate_arm(uint32_t r_info, uint32_t *ptr, Elf32_Addr addend, int type, int bRela, const char *Sym, intptr_t iBaseDiff);
int elf_doRelocate_unk(uint32_t , uint32_t *, Elf32_Addr , int , int , const char *, intptr_t);
#ifdef SUPPORT_ELF64
+int _Elf64DoReloc_X86_64(void *Base, const char *strtab, Elf64_Sym *symtab, Elf64_Xword r_info, void *ptr, Elf64_Sxword addend);
void *Elf64Relocate(void *Base, char **envp, const char *Filename);
int Elf64GetSymbol(void *Base, const char *Name, void **Ret, size_t *Size);
#endif