3 * - By John Hodge (thePowersGang)
13 #include <mm_virt.h> // MM_USER_MAX
15 #define MMAP_PAGES_PER_BLOCK 16
18 typedef struct sVFS_MMapPageBlock tVFS_MMapPageBlock;
19 struct sVFS_MMapPageBlock
21 tVFS_MMapPageBlock *Next;
22 Uint64 BaseOffset; // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
23 tPAddr PhysAddrs[MMAP_PAGES_PER_BLOCK];
27 //void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset);
28 void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask);
29 int VFS_MMap_MapPage(tVFS_Node *Node, unsigned int PageNum, tVFS_MMapPageBlock *pb, void *mapping_dest, unsigned int Protection);
30 //int VFS_MUnmap(void *Addr, size_t Length);
31 bool _range_free(const tPage *Base, Uint NumPages);
34 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
36 ENTER("pDestHint xLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
38 if( Flags & MMAP_MAP_ANONYMOUS )
39 Offset = (tVAddr)DestHint & 0xFFF;
41 unsigned int npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
42 unsigned int pagenum = Offset / PAGE_SIZE;
43 LOG("npages=%u,pagenum=%u", npages, pagenum);
45 tVAddr mapping_base = (tVAddr)DestHint;
47 if( Flags & MMAP_MAP_FIXED )
49 ASSERT( (Flags & MMAP_MAP_FIXED) && DestHint != NULL );
50 // Keep and use the hint
51 // - TODO: Validate that the region pointed to by the hint is correct
55 Log_Warning("VFS", "MMap: TODO Handle non-fixed mappings");
57 // Locate a free location in the address space (between brk and MM_USER_MAX)
58 // TODO: Prefer first location after DestHint, but can go below
60 // Search downwards from the top of user memory
62 for( tPage *dst = (tPage*)MM_USER_MAX - npages; dst > (tPage*)PAGE_SIZE; dst -- )
64 if( _range_free(dst, npages) ) {
65 mapping_base = (tVAddr)dst;
69 if( mapping_base == 0 )
71 Log_Warning("VFS", "MMap: Out of address space");
77 tPage *mapping_dest = (void*)(mapping_base & ~(PAGE_SIZE-1));
79 if( !_range_free(mapping_dest, npages) )
81 LOG("Specified range is not free");
85 Log_Warning("VFS", "MMap: Overwriting/replacing maps at %p+%x", mapping_base, Length);
88 // Handle anonymous mappings
89 if( Flags & MMAP_MAP_ANONYMOUS )
91 // TODO: Comvert \a Protection into a flag set
92 void *ret = VFS_MMap_Anon((void*)mapping_base, Length, 0, 0);
96 tVFS_Handle *h = VFS_GetHandle(FD);
97 if( !h || !h->Node ) LEAVE_RET('n', NULL);
101 Mutex_Acquire( &h->Node->Lock );
103 tVFS_MMapPageBlock *pb, **pb_pnp = (tVFS_MMapPageBlock**)&h->Node->MMapInfo;
104 // Search for existing mapping for each page
105 // - Sorted list of 16 page blocks
106 for( pb = h->Node->MMapInfo; pb; pb_pnp = &pb->Next, pb = pb->Next )
108 if( pb->BaseOffset + MMAP_PAGES_PER_BLOCK > pagenum )
112 LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
114 // - Allocate a block if needed
115 if( !pb || pb->BaseOffset > pagenum )
118 pb = calloc( 1, sizeof(tVFS_MMapPageBlock) );
120 Mutex_Release( &h->Node->Lock );
121 LEAVE_RET('n', NULL);
124 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
128 // - Map (and allocate) pages
131 ASSERTC( pagenum, >=, pb->BaseOffset );
132 ASSERTC( pagenum - pb->BaseOffset, <, MMAP_PAGES_PER_BLOCK );
133 if( MM_GetPhysAddr( mapping_dest ) == 0 )
135 LOG("Map page to %p", mapping_dest);
136 if( VFS_MMap_MapPage(h->Node, pagenum, pb, mapping_dest, Protection) )
138 Mutex_Release( &h->Node->Lock );
145 LOG("Flag update on %p", mapping_dest);
146 if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
148 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
151 if( Flags & MMAP_MAP_PRIVATE ) {
152 // TODO: Don't allow the page to change underneath either
153 MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
158 // Roll on to next block if needed
159 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
161 if( !pb->Next || pb->Next->BaseOffset != pagenum )
163 if( pb->Next ) ASSERTC(pb->Next->BaseOffset % MMAP_PAGES_PER_BLOCK, ==, 0);
164 tVFS_MMapPageBlock *newpb = malloc( sizeof(tVFS_MMapPageBlock) );
165 newpb->Next = pb->Next;
166 newpb->BaseOffset = pagenum;
167 memset(newpb->PhysAddrs, 0, sizeof(newpb->PhysAddrs));
175 Mutex_Release( &h->Node->Lock );
177 LEAVE('p', mapping_base);
178 return (void*)mapping_base;
181 void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask)
183 size_t ofs = (tVAddr)Destination & (PAGE_SIZE-1);
184 tPage *mapping_dest = (void*)( (char*)Destination - ofs );
188 size_t bytes = MIN(PAGE_SIZE - ofs, Length);
190 // Allocate a partial page
191 if( MM_GetPhysAddr(mapping_dest) )
193 // Already allocated page, clear the area we're touching
194 ASSERT( ofs + bytes <= PAGE_SIZE );
196 // TODO: Double check that this area isn't already zero
197 memset( Destination, 0, bytes );
199 MM_SetFlags(mapping_dest, FlagsSet, FlagsMask);
201 LOG("#1: Clear %i from %p", Length, Destination);
205 MM_AllocateZero(mapping_dest);
206 LOG("#1: Allocate for %p", Destination);
211 while( Length >= PAGE_SIZE )
213 if( MM_GetPhysAddr( mapping_dest ) )
215 // We're allocating entire pages here, so free this page and replace with a COW zero
216 MM_Deallocate(mapping_dest);
217 LOG("Replace %p with zero page", mapping_dest);
221 LOG("Allocate zero at %p", mapping_dest);
223 MM_AllocateZero(mapping_dest);
230 ASSERT(Length < PAGE_SIZE);
233 if( MM_GetPhysAddr(mapping_dest) )
235 // TODO: Don't touch page if already zero
236 memset( mapping_dest, 0, Length );
237 LOG("Clear %i in %p", Length, mapping_dest);
241 MM_AllocateZero(mapping_dest);
242 LOG("Anon map to %p", mapping_dest);
249 int VFS_MMap_MapPage(tVFS_Node *Node, unsigned int pagenum, tVFS_MMapPageBlock *pb, void *mapping_dest, unsigned int Protection)
251 if( pb->PhysAddrs[pagenum - pb->BaseOffset] != 0 )
253 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
254 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
255 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
256 pb->PhysAddrs[pagenum - pb->BaseOffset]);
260 tVFS_NodeType *nt = Node->Type;
266 nt->MMap(Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest);
270 // Allocate pages and read data
271 if( MM_Allocate(mapping_dest) == 0 ) {
275 // TODO: Clip read length
276 read_len = nt->Read(Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest, 0);
277 // TODO: This was commented out, why?
278 if( read_len != PAGE_SIZE ) {
279 memset( (char*)mapping_dest + read_len, 0, PAGE_SIZE-read_len );
282 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
283 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], Node );
284 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
285 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
286 pb->PhysAddrs[pagenum - pb->BaseOffset]);
289 Node->ReferenceCount ++;
292 if( !(Protection & MMAP_PROT_WRITE) ) {
293 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
296 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
299 if( Protection & MMAP_PROT_EXEC ) {
300 MM_SetFlags(mapping_dest, MM_PFLAG_EXEC, MM_PFLAG_EXEC);
303 MM_SetFlags(mapping_dest, 0, MM_PFLAG_EXEC);
309 int VFS_MUnmap(void *Addr, size_t Length)
315 bool _range_free(const tPage *Base, Uint NumPages)
317 for( int i = 0; i < NumPages; i ++ )
319 if( MM_GetPhysAddr(Base + i) )