#include <mm_virt.h>
#include <hal_proc.h>
+#define TRACE_MAPS 0
+
#define AP_KRW_ONLY 1 // Kernel page
#define AP_KRO_ONLY 5 // Kernel RO page
#define AP_RW_BOTH 3 // Standard RW
int MM_Map(tVAddr VAddr, tPAddr PAddr)
{
tMM_PageInfo pi = {0};
-// Log("MM_Map %P=>%p", PAddr, VAddr);
+ #if TRACE_MAPS
+ Log("MM_Map %P=>%p", PAddr, VAddr);
+ #endif
pi.PhysAddr = PAddr;
pi.Size = 12;
{
tPAddr table;
Uint32 *tmp_map;
- Uint32 *cur = (void*)MM_TABLE0USER;
+ Uint32 *cur = (void*)MM_TABLE1USER;
// Uint32 *cur = &FRACTAL(MM_TABLE1USER,0);
int i;
for( i = 0; i < 1024; i ++ )
{
- switch(cur[i] & 3)
+// Log_Debug("MMVirt", "cur[%i] (%p) = %x", Table*256+i, &cur[Table*256+i], cur[Table*256+i]);
+ switch(cur[Table*256+i] & 3)
{
case 0: tmp_map[i] = 0; break;
case 1:
tmp_map[i] = 0;
- Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable");
+ Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable (%p)", (Table*256+i)*0x1000);
// Large page?
break;
case 2:
Uint32 *new_lvl1_1, *new_lvl1_2, *cur;
Uint32 *tmp_map;
int i;
+
+// MM_DumpTables(0, KERNEL_BASE);
ret = MM_AllocateRootTable();
tmp_map = new_lvl1_1;
for( i = 0; i < 0x800-4; i ++ )
{
-// Log("i = %i", i);
+ // HACK! Ignore the original identity mapping
+ if( i == 0 && Threads_GetTID() == 0 ) {
+ tmp_map[0] = 0;
+ continue;
+ }
if( i == 0x400 )
tmp_map = &new_lvl1_2[-0x400];
switch( cur[i] & 3 )
// Map the pages
for( i = 0; i < NPages; i ++ )
- MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAddr);
+ MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAGE_SIZE);
// and return
LEAVE('p', ret);
return ret;
tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PAddr)
{
- Log_Error("MMVirt", "TODO: Implement MM_AllocDMA");
- return 0;
+ tPAddr phys;
+ tVAddr ret;
+
+ phys = MM_AllocPhysRange(Pages, MaxBits);
+ if(!phys) {
+ Log_Warning("MMVirt", "No space left for a %i page block (MM_AllocDMA)", Pages);
+ return 0;
+ }
+
+ ret = MM_MapHWPages(phys, Pages);
+ *PAddr = phys;
+
+ return ret;
}
void MM_UnmapHWPages(tVAddr Vaddr, Uint Number)