#include <mm_virt.h>
#include <pmemmap.h>
#include <hal_proc.h>
+#include <semaphore.h>
//#define USE_STACK 1
#define TRACE_ALLOCS 0 // Print trace messages on AllocPhys/DerefPhys
// === CODE ===
void MM_Install(int NPMemRanges, tPMemMapEnt *PMemRanges)
{
- Uint i;
Uint64 maxAddr = 0;
// --- Find largest address
- for( i = 0; i < NPMemRanges; i ++ )
+ for( Uint i = 0; i < NPMemRanges; i ++ )
{
tPMemMapEnt *ent = &PMemRanges[i];
// If entry is RAM and is above `maxAddr`, change `maxAddr`
giTotalMemorySize += ent->Length >> 12;
}
}
+ LOG("giTotalMemorySize = %lli KiB", giTotalMemorySize*4);
+ LOG("maxAddr = 0x%X", maxAddr);
+
+ // Clip to 32-bits
+ if( maxAddr > (1ULL << 32) ) {
+ maxAddr = (1ULL << 32);
+ }
giPageCount = maxAddr >> 12;
giLastPossibleFree = giPageCount - 1;
-
memsetd(gaPageBitmap, 0xFFFFFFFF, giPageCount/32);
// Set up allocateable space
- for( i = 0; i < NPMemRanges; i ++ )
+ for( Uint i = 0; i < NPMemRanges; i ++ )
{
tPMemMapEnt *ent = &PMemRanges[i];
if( ent->Type == PMEMTYPE_FREE )
{
Uint64 startpg = ent->Start / PAGE_SIZE;
Uint64 pgcount = ent->Length / PAGE_SIZE;
+ // Ignore start addresses >32 bits
+ if( startpg > (1 << 20) )
+ continue ;
+ // Clip lengths to 32-bit address space
+ if( startpg + pgcount > (1<<20) )
+ pgcount = (1<<20) - startpg;
+
while( startpg % 32 && pgcount ) {
gaPageBitmap[startpg/32] &= ~(1U << (startpg%32));
startpg ++;
}
else if( ent->Type == PMEMTYPE_USED )
{
+ // TODO: Clip?
giPhysAlloc += ent->Length / PAGE_SIZE;
}
}
// Fill Superpage bitmap
// - A set bit means that there are no free pages in this block of 32
- for( i = 0; i < (giPageCount+31)/32; i ++ )
+ for( Uint i = 0; i < (giPageCount+31)/32; i ++ )
{
if( gaPageBitmap[i] + 1 == 0 ) {
gaSuperBitmap[i/32] |= (1 << i%32);
// Release Spinlock
Mutex_Release( &glPhysAlloc );
-
- LEAVE('X', ret);
- if( ret == 0x17FFE000 )
- LogF("TRIP!\n");
+ LEAVE('P', ret);
+
#if TRACE_ALLOCS
if( now() > 4000 ) {
Log_Debug("PMem", "MM_AllocPhys: RETURN %P (%i free)", ret, giPageCount-giPhysAlloc);
{
if( MM_GetPhysAddr( &gaPageReferences[PAddr] ) == 0 )
{
- int i, base;
- tVAddr addr = ((tVAddr)&gaPageReferences[PAddr]) & ~0xFFF;
-// Log_Debug("PMem", "MM_RefPhys: Allocating info for %X", PAddr);
+ Uint base = PAddr & ~(1024-1);
Mutex_Release( &glPhysAlloc );
- if( MM_Allocate( addr ) == 0 ) {
+ // No infinite recursion, AllocPhys doesn't need the reference array
+ // TODO: Race condition? (racy on populating)
+ if( MM_Allocate( &gaPageReferences[base] ) == 0 )
+ {
Log_KernelPanic("PMem",
"MM_RefPhys: Out of physical memory allocating info for %X",
PAddr*PAGE_SIZE
);
+ for(;;);
}
Mutex_Acquire( &glPhysAlloc );
+ // TODO: Solve race condition. (see below)
+ // [1] See unallocated
+ // Release lock
+ // [2] Acquire lock
+ // See unallocated
+ // Release lock
+ // Allocate
+ // [1] Allocate
+ // Acquire lock
+ // Populate
+ // Release lock
+ // [2] Acquire lock
+ // Populate (clobbering)
- base = PAddr & ~(1024-1);
- for( i = 0; i < 1024; i ++ ) {
+ // Fill references from allocated bitmap
+ for( int i = 0; i < 1024; i ++ )
+ {
gaPageReferences[base + i] = (gaPageBitmap[(base+i)/32] & (1 << (base+i)%32)) ? 1 : 0;
}
}
int MM_SetPageNode(tPAddr PAddr, void *Node)
{
- tVAddr block_addr;
-
if( MM_GetRefCount(PAddr) == 0 ) return 1;
PAddr /= PAGE_SIZE;
- block_addr = (tVAddr) &gaPageNodes[PAddr];
- block_addr &= ~(PAGE_SIZE-1);
+ void *page_ptr = (void*)( (tVAddr)&gaPageNodes[PAddr] & ~(PAGE_SIZE-1) );
- if( !MM_GetPhysAddr( (void*)block_addr ) )
+ if( !MM_GetPhysAddr( page_ptr ) )
{
- if( !MM_Allocate( block_addr ) ) {
+ if( !MM_Allocate( page_ptr ) ) {
Log_Warning("PMem", "Unable to allocate Node page");
return -1;
}
- memset( (void*)block_addr, 0, PAGE_SIZE );
+ memset( page_ptr, 0, PAGE_SIZE );
}
gaPageNodes[PAddr] = Node;