From: John Hodge Date: Sun, 21 Aug 2011 08:32:41 +0000 (+0800) Subject: Kernel/arm7 - Working on ARM7 port X-Git-Tag: rel0.11~144 X-Git-Url: https://git.ucc.asn.au/?a=commitdiff_plain;h=7ca8dd27be34ef1a089e42b9b5518e64c9d4dd11;p=tpg%2Facess2.git Kernel/arm7 - Working on ARM7 port - PMM implemented - VMM on the way - Proc started, but nowhere near complete --- diff --git a/Kernel/arch/arm7/Makefile b/Kernel/arch/arm7/Makefile index e7102f00..ff9532df 100644 --- a/Kernel/arch/arm7/Makefile +++ b/Kernel/arch/arm7/Makefile @@ -21,3 +21,4 @@ CPPFLAGS += -DMMU_PRESENT=$(MMU_PRESENT) -DPCI_ADDRESS=$(PCI_ADDRESS) LDFLAGS += `$(CC) --print-libgcc-file-name` A_OBJ = start.ao main.o lib.o time.o pci.o +A_OBJ += mm_phys.o mm_virt.o proc.o diff --git a/Kernel/arch/arm7/include/arch.h b/Kernel/arch/arm7/include/arch.h index 3dccc09f..d4f81561 100644 --- a/Kernel/arch/arm7/include/arch.h +++ b/Kernel/arch/arm7/include/arch.h @@ -8,6 +8,7 @@ // === CONSTANTS === #define INVLPTR ((void*)-1) #define BITS 32 +#define PAGE_SIZE 0x1000 // === TYPES === typedef unsigned int Uint; @@ -33,5 +34,8 @@ typedef Uint32 tPAddr; extern void Debug_PutCharDebug(char Ch); extern void Debug_PutStringDebug(const char *String); +// This should be elsewhere, but CBF +extern void MM_SetupPhys(void); +extern int MM_InitialiseVirtual(void); #endif diff --git a/Kernel/arch/arm7/include/mm_virt.h b/Kernel/arch/arm7/include/mm_virt.h index 27bfbfde..872258be 100644 --- a/Kernel/arch/arm7/include/mm_virt.h +++ b/Kernel/arch/arm7/include/mm_virt.h @@ -26,8 +26,13 @@ #define MM_MODULE_MIN 0xC0000000 // - 0xD0000000 #define MM_MODULE_MAX 0xD0000000 -#define MM_KHEAP_MIN 0xE0000000 -#define MM_KHEAP_MAX 0xF0000000 +// PMM Data, giving it 128MiB is overkill, but it's unused atm +#define MM_MAXPHYSPAGE (1024*1024) +// 2^(32-12) max pages +// 8.125 bytes per page (for bitmap allocation) +// = 8.125 MiB +#define MM_PMM_BASE 0xE0000000 +#define MM_PMM_END 0xF0000000 #define MM_KERNEL_VFS 0xFF000000 // #define MM_TABLE1KERN 0xFF800000 // - 0x???????? 4MiB diff --git a/Kernel/arch/arm7/link.ld b/Kernel/arch/arm7/link.ld index 53e1a605..b16e7f63 100644 --- a/Kernel/arch/arm7/link.ld +++ b/Kernel/arch/arm7/link.ld @@ -1,9 +1,24 @@ ENTRY (_start) +_kernel_base = 0x80000000; + SECTIONS { - . = 0x80000000; - .text : { *(.text*) *(.rodata*) } - .data : { *(.data*) } - .bss : { *(.bss*) *(COMMON*) } + . = 0; + . += _kernel_base; + .text : AT( ADDR(.text) - _kernel_base ) + { + *(.text*) + *(.rodata*) + } + .data : AT( ADDR(.text) - _kernel_base ) + { + *(.padata) + *(.data*) + } + .bss : AT( ADDR(.text) - _kernel_base ) + { + *(.bss*) + *(COMMON*) + } } diff --git a/Kernel/arch/arm7/main.c b/Kernel/arch/arm7/main.c index 891501d1..1e35c0ef 100644 --- a/Kernel/arch/arm7/main.c +++ b/Kernel/arch/arm7/main.c @@ -4,10 +4,10 @@ * ARM7 Entrypoint * arch/arm7/main.c */ +#include // === IMPORTS === extern void Interrupts_Setup(void); -extern void MM_SetupPhys(void); // === PROTOTYPES === int kmain(void); diff --git a/Kernel/arch/arm7/mm_phys.c b/Kernel/arch/arm7/mm_phys.c index 76a88809..d91f889f 100644 --- a/Kernel/arch/arm7/mm_phys.c +++ b/Kernel/arch/arm7/mm_phys.c @@ -4,3 +4,49 @@ * ARM7 Physical Memory Manager * arch/arm7/mm_phys.c */ +#include +#include + +#define MM_NUM_RANGES 1 // Single range +#define MM_RANGE_MAX 0 + +#define NUM_STATIC_ALLOC 4 + +char gStaticAllocPages[NUM_STATIC_ALLOC][PAGE_SIZE] __attribute__ ((section(".padata"))); +tPAddr gaiStaticAllocPages[NUM_STATIC_ALLOC] = { + (tPAddr)(&gStaticAllocPages[0] - KERNEL_BASE), + (tPAddr)(&gStaticAllocPages[1] - KERNEL_BASE), + (tPAddr)(&gStaticAllocPages[2] - KERNEL_BASE), + (tPAddr)(&gStaticAllocPages[3] - KERNEL_BASE) +}; +extern char gKernelEnd[]; + +#include + +void MM_SetupPhys(void) +{ + MM_Tpl_InitPhys( 16*1024*1024/0x1000, NULL ); +} + +int MM_int_GetMapEntry( void *Data, int Index, tPAddr *Start, tPAddr *Length ) +{ + switch(Index) + { + case 0: + *Start = ((tVAddr)&gKernelEnd - KERNEL_BASE + 0xFFF) & ~0xFFF; + *Length = 16*1024*1024; + return 1; + default: + return 0; + } +} + +/** + * \brief Takes a physical address and returns the ID of its range + * \param Addr Physical address of page + * \return Range ID from eMMPhys_Ranges + */ +int MM_int_GetRangeID( tPAddr Addr ) +{ + return MM_RANGE_MAX; // ARM doesn't need ranges +} diff --git a/Kernel/arch/arm7/mm_virt.c b/Kernel/arch/arm7/mm_virt.c index 218dc85e..78f05c62 100644 --- a/Kernel/arch/arm7/mm_virt.c +++ b/Kernel/arch/arm7/mm_virt.c @@ -4,41 +4,139 @@ * ARM7 Virtual Memory Manager * - arch/arm7/mm_virt.c */ +#include #include +#define AP_KRW_ONLY 0x1 +#define AP_KRO_ONLY 0x5 +#define AP_RW_BOTH 0x3 +#define AP_RO_BOTH 0x6 + // === TYPES === typedef struct { tPAddr PhysAddr; Uint8 Size; Uint8 Domain; + BOOL bExecutable; + BOOL bGlobal; + BOOL bShared; + int AP; } tMM_PageInfo; +// === PROTOTYPES === + int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi); + int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi); + // === GLOBALS === -Uint32 *gMM_KernelTable0 = (void*)MM_TABLE0KERN; -Uint32 *gMM_KernelTable1 = (void*)MM_TABLE1KERN; // === CODE === int MM_InitialiseVirtual(void) { + return 0; +} + +int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) +{ + Uint32 *table0, *table1; + Uint32 *desc; + + if(VAddr & 0x80000000 ) { + table0 = (void*)MM_TABLE0KERN; // Level 0 + table1 = (void*)MM_TABLE1KERN; // Level 1 + } + else { + table0 = (void*)MM_TABLE0USER; + table1 = (void*)MM_TABLE1USER; + } + VAddr &= 0x7FFFFFFF; + + desc = &table0[ VAddr >> 20 ]; + + switch(pi->Size) + { + case 12: // Small Page + case 16: // Large Page + if( (*desc & 3) == 0 ) { + if( pi->PhysAddr == 0 ) return 0; + // Allocate + *desc = MM_AllocPhys(); + *desc |= pi->Domain << 5; + *desc |= 1; + } + desc = &table1[ VAddr >> 12 ]; + if( pi->Size == 12 ) + { + // Small page + // - Error if overwriting a large page + if( (*desc & 3) == 1 ) return 1; + if( pi->PhysAddr == 0 ) { + *desc = 0; + return 0; + } + + *desc = (pi->PhysAddr & 0xFFFFF000) | 2; + if(!pi->bExecutable) *desc |= 1; // XN + if(!pi->bGlobal) *desc |= 1 << 11; // NG + if( pi->bShared) *desc |= 1 << 10; // S + *desc |= (pi->AP & 3) << 4; // AP + *desc |= ((pi->AP >> 2) & 1) << 9; // APX + } + else + { + // Large page + // TODO: + } + break; + case 20: // Section or unmapped + Log_Warning("MM", "TODO: Implement sections"); + break; + case 24: // Supersection + // Error if not aligned + if( VAddr & 0xFFFFFF ) { + return 1; + } + if( (*desc & 3) == 0 || ((*desc & 3) == 2 && (*desc & (1 << 18))) ) + { + if( pi->PhysAddr == 0 ) { + *desc = 0; + // TODO: Apply to all entries + return 0; + } + // Apply + *desc = pi->PhysAddr & 0xFF000000; +// *desc |= ((pi->PhysAddr >> 32) & 0xF) << 20; +// *desc |= ((pi->PhysAddr >> 36) & 0x7) << 5; + *desc |= 2 | (1 << 18); + // TODO: Apply to all entries + return 0; + } + return 1; + } + + return 1; } int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) { - Uint32 *table0, table1; + Uint32 *table0, *table1; Uint32 desc; if(VAddr & 0x80000000 ) { - table0 = MM_TABLE0KERN; - table1 = MM_TABLE1KERN; + table0 = (void*)MM_TABLE0KERN; // Level 0 + table1 = (void*)MM_TABLE1KERN; // Level 1 } else { - table0 = MM_TABLE0USER; - table1 = MM_TABLE1USER; + table0 = (void*)MM_TABLE0USER; + table1 = (void*)MM_TABLE1USER; } - VAddr & 0x7FFFFFFF; + VAddr &= 0x7FFFFFFF; desc = table0[ VAddr >> 20 ]; + + pi->bExecutable = 1; + pi->bGlobal = 0; + pi->bShared = 0; switch( (desc & 3) ) { @@ -73,7 +171,7 @@ int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) pi->PhysAddr = desc & 0xFFFFF000; pi->bExecutable = desc & 1; pi->bGlobal = !(desc >> 11); - pi->bSharec = (desc >> 10) & 1; + pi->bShared = (desc >> 10) & 1; return 1; } return 1; @@ -83,8 +181,8 @@ int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) if( desc & (1 << 18) ) { // Supersection pi->PhysAddr = desc & 0xFF000000; - pi->PhysAddr |= ((desc >> 20) & 0xF) << 32; - pi->PhysAddr |= ((desc >> 5) & 0x7) << 36; + pi->PhysAddr |= (Uint64)((desc >> 20) & 0xF) << 32; + pi->PhysAddr |= (Uint64)((desc >> 5) & 0x7) << 36; pi->Size = 24; pi->Domain = 0; // Superpages default to zero return 0; @@ -103,6 +201,7 @@ int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) pi->Domain = 0; return 2; } + return 2; } // --- Exports --- @@ -113,3 +212,87 @@ tPAddr MM_GetPhysAddr(tVAddr VAddr) return 0; return pi.PhysAddr; } + +Uint MM_GetFlags(tVAddr VAddr) +{ + tMM_PageInfo pi; + int ret; + + if( MM_int_GetPageInfo(VAddr, &pi) ) + return 0; + + ret = 0; + + switch(pi.AP) + { + case AP_KRW_ONLY: + ret |= MM_PFLAG_KERNEL; + break; + case AP_KRO_ONLY: + ret |= MM_PFLAG_KERNEL|MM_PFLAG_RO; + break; + case AP_RW_BOTH: + break; + case AP_RO_BOTH: + ret |= MM_PFLAG_RO; + break; + } + + if( pi.bExecutable ) ret |= MM_PFLAG_EXEC; + return ret; +} + +void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) +{ + tMM_PageInfo pi; + if( MM_int_GetPageInfo(VAddr, &pi) ) + return; + + + +} + +int MM_Map(tVAddr VAddr, tPAddr PAddr) +{ + tMM_PageInfo pi = {0}; + pi.PhysAddr = PAddr; + pi.Size = 12; + pi.AP = AP_KRW_ONLY; // Kernel Read/Write + pi.bExecutable = 1; + if( MM_int_SetPageInfo(VAddr, &pi) ) { + MM_DerefPhys(pi.PhysAddr); + return 0; + } + return pi.PhysAddr; +} + +tPAddr MM_Allocate(tVAddr VAddr) +{ + tMM_PageInfo pi = {0}; + + pi.PhysAddr = MM_AllocPhys(); + if( pi.PhysAddr == 0 ) return 0; + pi.Size = 12; + pi.AP = AP_KRW_ONLY; // Kernel Read/Write + pi.bExecutable = 1; + if( MM_int_SetPageInfo(VAddr, &pi) ) { + MM_DerefPhys(pi.PhysAddr); + return 0; + } + return pi.PhysAddr; +} + +void MM_Deallocate(tVAddr VAddr) +{ + tMM_PageInfo pi; + + if( MM_int_GetPageInfo(VAddr, &pi) ) return ; + + if( pi.PhysAddr == 0 ) return; + MM_DerefPhys(pi.PhysAddr); + + pi.PhysAddr = 0; + pi.AP = 0; + pi.bExecutable = 0; + MM_int_SetPageInfo(VAddr, &pi); +} diff --git a/Kernel/arch/arm7/proc.c b/Kernel/arch/arm7/proc.c new file mode 100644 index 00000000..d7775eec --- /dev/null +++ b/Kernel/arch/arm7/proc.c @@ -0,0 +1,24 @@ +/* + * Acess2 + * - By John Hodge (thePowersGang) + * + * arch/arm7/proc. + * - ARM7 Process Switching + */ +#include +#include + +// === PROTOTYPES === + +// === GLOBALS === +tThread *gpCurrentThread; + +// === CODE === +void Proc_Start(void) +{ +} + +tThread *Proc_GetCurThread(void) +{ + return gpCurrentThread; +} diff --git a/Kernel/arch/arm7/start.s b/Kernel/arch/arm7/start.s index 43468fc5..4fb371b4 100644 --- a/Kernel/arch/arm7/start.s +++ b/Kernel/arch/arm7/start.s @@ -16,7 +16,9 @@ _start: bl main 1: b 1b @ Infinite loop - SyscallHandler: +.section .padata +.globl kernel_table0 + diff --git a/Kernel/include/tpl_mm_phys_bitmap.h b/Kernel/include/tpl_mm_phys_bitmap.h new file mode 100644 index 00000000..76a38104 --- /dev/null +++ b/Kernel/include/tpl_mm_phys_bitmap.h @@ -0,0 +1,355 @@ +/* + * Acess2 Core + * + * include/tpl_mm_phys_bitmap.h + * Physical Memory Manager Template + */ +#define DEBUG 0 + +/* + * Bitmap Edition + * + * Uses 4.125+PtrSize bytes per page + */ + +#define MM_PAGE_REFCOUNTS MM_PMM_BASE +#define MM_PAGE_NODES (MM_PMM_BASE+(MM_MAXPHYSPAGE*sizeof(Uint32))) +#define MM_PAGE_BITMAP (MM_PAGE_NODES+(MM_MAXPHYSPAGE*sizeof(void*))) + +// === PROTOTYPES === +//void MM_InitPhys_Multiboot(tMBoot_Info *MBoot); +//tPAddr MM_AllocPhysRange(int Num, int Bits); +//tPAddr MM_AllocPhys(void); +//void MM_RefPhys(tPAddr PAddr); +//void MM_DerefPhys(tPAddr PAddr); + int MM_int_GetRangeID( tPAddr Addr ); + int MM_int_GetMapEntry( void *Data, int Index, tPAddr *Start, tPAddr *Length ); +void MM_Tpl_InitPhys(int MaxRAMPage, void *MemoryMap); + +// === GLOBALS === +tMutex glPhysicalPages; +void **gapPageNodes = (void*)MM_PAGE_NODES; //!< Associated VFS Node for each page +Uint32 *gaiPageReferences = (void*)MM_PAGE_REFCOUNTS; // Reference Counts +Uint32 *gaPageBitmaps = (void*)MM_PAGE_BITMAP; // Used bitmap (1 == avail) +Uint64 giMaxPhysPage = 0; // Maximum Physical page + int gbPMM_Init = 0; + int gaiPhysRangeFirstFree[MM_NUM_RANGES]; + int gaiPhysRangeLastFree[MM_NUM_RANGES]; + int gaiPhysRangeNumFree[MM_NUM_RANGES]; + +// === CODE === +/** + * \brief Initialise the physical memory manager with a passed memory map + */ +void MM_Tpl_InitPhys(int MaxRAMPage, void *MemoryMap) +{ + int mapIndex = 0; + tPAddr rangeStart, rangeLen; + + if( MM_PAGE_BITMAP + (MM_MAXPHYSPAGE/8) > MM_PMM_END ) { + Log_KernelPanic("PMM", "Config Error, PMM cannot fit data in allocated range"); + } + + giMaxPhysPage = MaxRAMPage; + + while( MM_int_GetMapEntry(MemoryMap, mapIndex++, &rangeStart, &rangeLen) ) + { + tVAddr bitmap_page; + rangeStart /= PAGE_SIZE; + rangeLen /= PAGE_SIZE; + + bitmap_page = (tVAddr)&gaPageBitmaps[rangeStart/32]; + bitmap_page &= ~(PAGE_SIZE-1); + + // Only need to allocate bitmaps + if( !MM_GetPhysAddr( bitmap_page ) ) { + if( MM_Allocate( bitmap_page ) ) { + Log_KernelPanic("PMM", "Out of memory during init, this is bad"); + return ; + } + memset( (void*)bitmap_page, 0, rangeStart/8 & ~(PAGE_SIZE-1) ); + } + + // Align to 32 pages + for( ; (rangeStart & 31) && rangeLen > 0; rangeStart++, rangeLen-- ) { + gaPageBitmaps[rangeStart / 32] |= 1 << (rangeStart&31); + } + // Mark blocks of 32 as avail + for( ; rangeLen > 31; rangeStart += 32, rangeLen -= 32 ) { + gaPageBitmaps[rangeStart / 32] = -1; + } + // Mark the tail + for( ; rangeLen > 0; rangeStart ++, rangeLen -- ) { + gaPageBitmaps[rangeStart / 32] |= 1 << (rangeStart&31); + } + } + + gbPMM_Init = 1; + + LEAVE('-'); +} + +/** + * \brief Allocate a contiguous range of physical pages with a maximum + * bit size of \a MaxBits + * \param Pages Number of pages to allocate + * \param MaxBits Maximum size of the physical address + * \note If \a MaxBits is <= 0, any sized address is used (with preference + * to higher addresses) + */ +tPAddr MM_AllocPhysRange(int Pages, int MaxBits) +{ + tPAddr addr, ret; + int rangeID; + int nFree = 0, i; + + ENTER("iPages iBits", Pages, MaxBits); + + // Get range ID + if( MaxBits <= 0 || MaxBits >= 64 ) // Speedup for the common case + rangeID = MM_RANGE_MAX; + else + rangeID = MM_int_GetRangeID( (1LL << MaxBits) - 1 ); + + Mutex_Acquire(&glPhysicalPages); + + // Check if the range actually has any free pages + while(gaiPhysRangeNumFree[rangeID] == 0 && rangeID) + rangeID --; + + LOG("rangeID = %i", rangeID); + + // Check if there is enough in the range + if(gaiPhysRangeNumFree[rangeID] >= Pages) + { + LOG("{%i,0x%x -> 0x%x}", + giPhysRangeFree[rangeID], + giPhysRangeFirst[rangeID], giPhysRangeLast[rangeID] + ); + // Do a cheap scan, scanning upwards from the first free page in + // the range + nFree = 0; + addr = gaiPhysRangeFirstFree[ rangeID ]; + while( addr <= gaiPhysRangeLastFree[ rangeID ] ) + { + #if USE_SUPER_BITMAP + // Check the super bitmap + if( gaSuperBitmap[addr / (32*32)] == 0 ) + { + LOG("nFree = %i = 0 (super) (0x%x)", nFree, addr); + nFree = 0; + addr += 1LL << (6+6); + addr &= ~0xFFF; // (1LL << 6+6) - 1 + continue; + } + #endif + // Check page block (32 pages) + if( gaPageBitmaps[addr / 32] == 0) { + LOG("nFree = %i = 0 (main) (0x%x)", nFree, addr); + nFree = 0; + addr += 1LL << (6); + addr &= ~0x3F; + continue; + } + // Check individual page + if( !(gaPageBitmaps[addr / 32] & (1LL << (addr & 31))) ) + { + LOG("nFree = %i = 0 (page) (0x%x)", nFree, addr); + nFree = 0; + addr ++; + continue; + } + nFree ++; + addr ++; + LOG("nFree(%i) == %i (0x%x)", nFree, Pages, addr); + if(nFree == Pages) + break; + } + LOG("nFree = %i", nFree); + // If we don't find a contiguous block, nFree will not be equal + // to Num, so we set it to zero and do the expensive lookup. + if(nFree != Pages) nFree = 0; + } + + if( !nFree ) + { + // Oops. ok, let's do an expensive check (scan down the list + // until a free range is found) + nFree = 1; + addr = gaiPhysRangeLastFree[ rangeID ]; + // TODO + Mutex_Release(&glPhysicalPages); + // TODO: Page out + // ATM. Just Warning + Warning(" MM_AllocPhysRange: Out of memory (unable to fulfil request for %i pages)", Pages); + Log_Warning("PMM", + "Out of memory (unable to fulfil request for %i pages)", + Pages + ); + LEAVE('i', 0); + return 0; + } + LOG("nFree = %i, addr = 0x%08x", nFree, addr); + + // Mark pages as allocated + addr -= Pages; + for( i = 0; i < Pages; i++, addr++ ) + { + // Mark as used + gaPageBitmaps[addr / 32] &= ~(1 << (addr & 31)); + // Maintain first possible free + rangeID = MM_int_GetRangeID(addr * PAGE_SIZE); + gaiPhysRangeNumFree[ rangeID ] --; + if(addr == gaiPhysRangeFirstFree[ rangeID ]) + gaiPhysRangeFirstFree[ rangeID ] += 1; + + // Mark as referenced if the reference count page is valid + if(MM_GetPhysAddr( (tVAddr)&gaiPageReferences[addr] )) { + gaiPageReferences[addr] = 1; + } + } + ret = addr; // Save the return address + + #if USE_SUPER_BITMAP + // Update super bitmap + Pages += addr & (32-1); + addr &= ~(32-1); + Pages = (Pages + (32-1)) & ~(32-1); + for( i = 0; i < Pages/32; i++ ) + { + if( gaPageBitmaps[ addr / 32 ] + 1 == 0 ) + gaSuperBitmap[addr / (32*32)] |= 1LL << ((addr / 32) & 31); + } + #endif + + Mutex_Release(&glPhysicalPages); + LEAVE('x', ret << 12); + return ret << 12; +} + +/** + * \brief Allocate a single physical page, with no preference as to address size. + */ +tPAddr MM_AllocPhys(void) +{ + int i; + + if( !gbPMM_Init ) + { + // Hack to allow allocation during setup + for(i = 0; i < NUM_STATIC_ALLOC; i++) { + if( gaiStaticAllocPages[i] ) { + tPAddr ret = gaiStaticAllocPages[i]; + gaiStaticAllocPages[i] = 0; + Log("MM_AllocPhys: Return %x, static alloc %i", ret, i); + return ret; + } + } + + tPAddr ret = 0; + for( ret = 0; ret < giMaxPhysPage; ret ++ ) + { + if( !MM_GetPhysAddr( (tVAddr)&gaPageBitmaps[ret/32] ) ) { + ret += PAGE_SIZE*8; + continue ; + } + if( gaPageBitmaps[ret/32] == 0 ) { + ret += 32-1; + continue ; + } + if( gaPageBitmaps[ret/32] & (1 << (ret&31)) ) { + gaPageBitmaps[ret/32] &= ~(1 << (ret&31)); + return ret * PAGE_SIZE; + } + } + Log_Error("PMM", "MM_AllocPhys failed duing init"); + return 0; + } + + return MM_AllocPhysRange(1, -1); +} + +/** + * \brief Reference a physical page + */ +void MM_RefPhys(tPAddr PAddr) +{ + tPAddr page = PAddr / PAGE_SIZE; + + if( page >= giMaxPhysPage ) return ; + + if( gaPageBitmaps[ page / 32 ] & (1LL << (page&31)) ) + { + // Allocate + gaPageBitmaps[page / 32] &= ~(1LL << (page&31)); + #if USE_SUPER_BITMAP + if( gaPageBitmaps[page / 32] == 0 ) + gaSuperBitmap[page / (32*32)] &= ~(1LL << ((page / 32) & 31)); + #endif + } + else + { + tVAddr refpage = (tVAddr)&gaiPageReferences[page] & ~(PAGE_SIZE-1); + // Reference again + if( !MM_GetPhysAddr( refpage ) ) + { + if( MM_Allocate(refpage) == 0 ) { + // Out of memory, can this be resolved? + // TODO: Reclaim memory + Log_Error("PMM", "Out of memory (MM_RefPhys)"); + return ; + } + memset((void*)refpage, 0, PAGE_SIZE); + gaiPageReferences[page] = 2; + } + else + gaiPageReferences[ page ] ++; + } +} + +/** + * \brief Dereference a physical page + */ +void MM_DerefPhys(tPAddr PAddr) +{ + Uint64 page = PAddr >> 12; + + if( PAddr >> 12 > giMaxPhysPage ) return ; + + if( MM_GetPhysAddr( (tVAddr)&gaiPageReferences[page] ) ) + { + if( gaiPageReferences[page] > 0 ) + gaiPageReferences[ page ] --; + if( gaiPageReferences[ page ] == 0 ) { + gaPageBitmaps[ page / 32 ] |= 1 << (page&31); + // TODO: Catch when all pages in this range have been dereferenced + } + } + else + gaPageBitmaps[ page / 32 ] |= 1 << (page&31); + // Clear node if needed + if( MM_GetPhysAddr( (tVAddr)&gapPageNodes[page] ) ) { + gapPageNodes[page] = NULL; + // TODO: Catch when all pages in this range are not using nodes + } + + // Update the free counts if the page was freed + if( gaPageBitmaps[ page / 32 ] & (1LL << (page&31)) ) + { + int rangeID; + rangeID = MM_int_GetRangeID( PAddr ); + gaiPhysRangeNumFree[ rangeID ] ++; + if( gaiPhysRangeFirstFree[rangeID] > page ) + gaiPhysRangeFirstFree[rangeID] = page; + if( gaiPhysRangeLastFree[rangeID] < page ) + gaiPhysRangeLastFree[rangeID] = page; + } + + #if USE_SUPER_BITMAP + // If the bitmap entry is not zero, set the bit free in the super bitmap + if(gaPageBitmaps[ page / 32 ] != 0 ) { + gaSuperBitmap[page / (32*32)] |= 1LL << ((page / 32) & 31); + } + #endif +} + diff --git a/Makefile.arm7.cfg b/Makefile.arm7.cfg new file mode 100644 index 00000000..c3682b5c --- /dev/null +++ b/Makefile.arm7.cfg @@ -0,0 +1,7 @@ + +CC = arm-elf-gcc +AS = arm-elf-as +LD = arm-elf-ld +OBJDUMP = arm-elf-objdump +DISASM = $(OBJDUMP) -d -S +ARCHDIR = arm7