3 * - Open, Close and ChDir
11 #define MMAP_PAGES_PER_BLOCK 16
14 typedef struct sVFS_MMapPageBlock tVFS_MMapPageBlock;
15 struct sVFS_MMapPageBlock
17 tVFS_MMapPageBlock *Next;
18 Uint64 BaseOffset; // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
19 tPAddr PhysAddrs[MMAP_PAGES_PER_BLOCK];
23 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
26 tVAddr mapping_dest, mapping_base;
28 tVFS_MMapPageBlock *pb, *prev;
30 ENTER("pDestHint iLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
32 npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
33 pagenum = Offset / PAGE_SIZE;
35 mapping_base = (tVAddr)DestHint;
36 mapping_dest = mapping_base & ~(PAGE_SIZE-1);
38 // TODO: Locate space for the allocation
40 // Handle anonymous mappings
41 if( Flags & MMAP_MAP_ANONYMOUS )
43 for( ; npages --; mapping_dest += PAGE_SIZE )
45 if( MM_GetPhysAddr(mapping_dest) ) {
46 // TODO: Set flags to COW if needed (well, if shared)
49 if( !MM_Allocate(mapping_dest) ) {
51 Log_Warning("VFS", "VFS_MMap: Anon alloc to %p failed", mapping_dest);
53 LOG("Anon map to %p", mapping_dest);
56 LEAVE_RET('p', (void*)mapping_base);
59 h = VFS_GetHandle(FD);
60 if( !h || !h->Node ) LEAVE_RET('n', NULL);
64 Mutex_Acquire( &h->Node->Lock );
66 // Search for existing mapping for each page
67 // - Sorted list of 16 page blocks
69 pb = h->Node->MMapInfo, prev = NULL;
70 pb && pb->BaseOffset + MMAP_PAGES_PER_BLOCK < pagenum;
71 prev = pb, pb = pb->Next
74 LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
76 // - Allocate a block if needed
77 if( !pb || pb->BaseOffset > pagenum )
80 pb = malloc( sizeof(tVFS_MMapPageBlock) );
82 Mutex_Release( &h->Node->Lock );
86 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
87 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
91 h->Node->MMapInfo = pb;
94 // - Map (and allocate) pages
97 if( MM_GetPhysAddr(mapping_dest) == 0 )
99 if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 )
102 h->Node->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest);
105 // Allocate pages and read data
106 if( MM_Allocate(mapping_dest) == 0 ) {
108 Mutex_Release( &h->Node->Lock );
112 h->Node->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest);
114 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
115 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node );
116 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
117 pb->PhysAddrs[pagenum - pb->BaseOffset]);
121 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
122 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
123 pb->PhysAddrs[pagenum - pb->BaseOffset]);
125 h->Node->ReferenceCount ++;
128 if( !(Protection & MMAP_PROT_WRITE) ) {
129 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
132 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
137 LOG("Flag update on %p", mapping_dest);
138 if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
140 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
143 if( Flags & MMAP_MAP_PRIVATE )
144 MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
146 mapping_dest += PAGE_SIZE;
148 // Roll on to next block if needed
149 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
151 if( pb->Next && pb->Next->BaseOffset == pagenum )
155 tVFS_MMapPageBlock *oldpb = pb;
156 pb = malloc( sizeof(tVFS_MMapPageBlock) );
157 pb->Next = oldpb->Next;
158 pb->BaseOffset = pagenum;
159 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
166 Mutex_Release( &h->Node->Lock );
168 LEAVE('p', mapping_base);
169 return (void*)mapping_base;
172 int VFS_MUnmap(void *Addr, size_t Length)