3 * - By John Hodge (thePowersGang)
14 #define MMAP_PAGES_PER_BLOCK 16
17 typedef struct sVFS_MMapPageBlock tVFS_MMapPageBlock;
18 struct sVFS_MMapPageBlock
20 tVFS_MMapPageBlock *Next;
21 Uint64 BaseOffset; // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
22 tPAddr PhysAddrs[MMAP_PAGES_PER_BLOCK];
26 //void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset);
27 void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask);
28 //int VFS_MUnmap(void *Addr, size_t Length);
31 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
35 tVFS_MMapPageBlock *pb, *prev;
37 ENTER("pDestHint iLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
39 if( Flags & MMAP_MAP_ANONYMOUS )
40 Offset = (tVAddr)DestHint & 0xFFF;
42 npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
43 pagenum = Offset / PAGE_SIZE;
45 mapping_base = (tVAddr)DestHint;
46 tPage *mapping_dest = (void*)(mapping_base & ~(PAGE_SIZE-1));
48 if( DestHint == NULL )
50 // TODO: Locate space for the allocation
55 // Handle anonymous mappings
56 if( Flags & MMAP_MAP_ANONYMOUS )
58 // TODO: Comvert \a Protection into a flag set
59 void *ret = VFS_MMap_Anon((void*)mapping_base, Length, 0, 0);
63 tVFS_Handle *h = VFS_GetHandle(FD);
64 if( !h || !h->Node ) LEAVE_RET('n', NULL);
68 Mutex_Acquire( &h->Node->Lock );
70 // Search for existing mapping for each page
71 // - Sorted list of 16 page blocks
73 pb = h->Node->MMapInfo, prev = NULL;
74 pb && pb->BaseOffset + MMAP_PAGES_PER_BLOCK <= pagenum;
75 prev = pb, pb = pb->Next
79 LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
81 // - Allocate a block if needed
82 if( !pb || pb->BaseOffset > pagenum )
85 pb = calloc( 1, sizeof(tVFS_MMapPageBlock) );
87 Mutex_Release( &h->Node->Lock );
91 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
95 h->Node->MMapInfo = pb;
98 // - Map (and allocate) pages
101 assert( pagenum >= pb->BaseOffset );
102 assert( pagenum - pb->BaseOffset < MMAP_PAGES_PER_BLOCK );
103 if( MM_GetPhysAddr( mapping_dest ) == 0 )
105 if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 )
107 tVFS_NodeType *nt = h->Node->Type;
113 nt->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest);
117 // Allocate pages and read data
118 if( MM_Allocate(mapping_dest) == 0 ) {
120 Mutex_Release( &h->Node->Lock );
124 // TODO: Clip read length
125 read_len = nt->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE,
127 // TODO: This was commented out, why?
128 if( read_len != PAGE_SIZE ) {
129 memset( (char*)mapping_dest + read_len, 0, PAGE_SIZE-read_len );
132 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
133 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node );
134 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
135 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
136 pb->PhysAddrs[pagenum - pb->BaseOffset]);
140 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
141 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
142 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
143 pb->PhysAddrs[pagenum - pb->BaseOffset]);
145 h->Node->ReferenceCount ++;
148 if( !(Protection & MMAP_PROT_WRITE) ) {
149 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
152 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
155 if( Protection & MMAP_PROT_EXEC ) {
156 MM_SetFlags(mapping_dest, MM_PFLAG_EXEC, MM_PFLAG_EXEC);
159 MM_SetFlags(mapping_dest, 0, MM_PFLAG_EXEC);
164 LOG("Flag update on %p", mapping_dest);
165 if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
167 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
170 if( Flags & MMAP_MAP_PRIVATE )
171 MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
175 // Roll on to next block if needed
176 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
178 if( pb->Next && pb->Next->BaseOffset == pagenum )
182 tVFS_MMapPageBlock *oldpb = pb;
183 pb = malloc( sizeof(tVFS_MMapPageBlock) );
184 pb->Next = oldpb->Next;
185 pb->BaseOffset = pagenum;
186 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
192 Mutex_Release( &h->Node->Lock );
194 LEAVE('p', mapping_base);
195 return (void*)mapping_base;
198 void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask)
200 size_t ofs = (tVAddr)Destination & (PAGE_SIZE-1);
201 tPage *mapping_dest = (void*)( (char*)Destination - ofs );
205 size_t bytes = MIN(PAGE_SIZE - ofs, Length);
207 // Allocate a partial page
208 if( MM_GetPhysAddr(mapping_dest) )
210 // Already allocated page, clear the area we're touching
211 ASSERT( ofs + bytes <= PAGE_SIZE );
213 // TODO: Double check that this area isn't already zero
214 memset( Destination, 0, bytes );
216 MM_SetFlags(mapping_dest, FlagsSet, FlagsMask);
218 LOG("#1: Clear %i from %p", Length, Destination);
222 MM_AllocateZero(mapping_dest);
223 LOG("#1: Allocate for %p", Destination);
228 while( Length >= PAGE_SIZE )
230 if( MM_GetPhysAddr( mapping_dest ) )
232 // We're allocating entire pages here, so free this page and replace with a COW zero
233 MM_Deallocate(mapping_dest);
234 LOG("Replace %p with zero page", mapping_dest);
238 LOG("Allocate zero at %p", mapping_dest);
240 MM_AllocateZero(mapping_dest);
247 ASSERT(Length < PAGE_SIZE);
250 if( MM_GetPhysAddr(mapping_dest) )
252 // TODO: Don't touch page if already zero
253 memset( mapping_dest, 0, Length );
254 LOG("Clear %i in %p", Length, mapping_dest);
258 MM_AllocateZero(mapping_dest);
259 LOG("Anon map to %p", mapping_dest);
266 int VFS_MUnmap(void *Addr, size_t Length)