3 * - By John Hodge (thePowersGang)
14 #define MMAP_PAGES_PER_BLOCK 16
17 typedef struct sVFS_MMapPageBlock tVFS_MMapPageBlock;
18 struct sVFS_MMapPageBlock
20 tVFS_MMapPageBlock *Next;
21 Uint64 BaseOffset; // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
22 tPAddr PhysAddrs[MMAP_PAGES_PER_BLOCK];
26 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
29 tVAddr mapping_dest, mapping_base;
31 tVFS_MMapPageBlock *pb, *prev;
33 ENTER("pDestHint iLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
35 if( Flags & MMAP_MAP_ANONYMOUS )
36 Offset = (tVAddr)DestHint & 0xFFF;
38 npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
39 pagenum = Offset / PAGE_SIZE;
41 mapping_base = (tVAddr)DestHint;
42 mapping_dest = mapping_base & ~(PAGE_SIZE-1);
44 // TODO: Locate space for the allocation
46 // Handle anonymous mappings
47 if( Flags & MMAP_MAP_ANONYMOUS )
50 LOG("%i pages anonymous to %p", npages, mapping_dest);
51 for( ; npages --; mapping_dest += PAGE_SIZE, ofs += PAGE_SIZE )
53 if( MM_GetPhysAddr((void*)mapping_dest) ) {
54 // TODO: Set flags to COW if needed (well, if shared)
55 MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
56 LOG("clear from %p, %i bytes", (void*)(mapping_base + ofs),
57 PAGE_SIZE - (mapping_base & (PAGE_SIZE-1))
59 memset( (void*)(mapping_base + ofs), 0, PAGE_SIZE - (mapping_base & (PAGE_SIZE-1)));
63 LOG("New empty page");
64 // TODO: Map a COW zero page instead
65 if( !MM_Allocate(mapping_dest) ) {
67 Log_Warning("VFS", "VFS_MMap: Anon alloc to %p failed", mapping_dest);
69 memset((void*)mapping_dest, 0, PAGE_SIZE);
70 LOG("Anon map to %p", mapping_dest);
73 LEAVE_RET('p', (void*)mapping_base);
76 h = VFS_GetHandle(FD);
77 if( !h || !h->Node ) LEAVE_RET('n', NULL);
81 Mutex_Acquire( &h->Node->Lock );
83 // Search for existing mapping for each page
84 // - Sorted list of 16 page blocks
86 pb = h->Node->MMapInfo, prev = NULL;
87 pb && pb->BaseOffset + MMAP_PAGES_PER_BLOCK < pagenum;
88 prev = pb, pb = pb->Next
92 LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
94 // - Allocate a block if needed
95 if( !pb || pb->BaseOffset > pagenum )
98 pb = calloc( 1, sizeof(tVFS_MMapPageBlock) );
100 Mutex_Release( &h->Node->Lock );
101 LEAVE_RET('n', NULL);
104 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
108 h->Node->MMapInfo = pb;
111 // - Map (and allocate) pages
114 if( MM_GetPhysAddr( (void*)mapping_dest ) == 0 )
116 if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 )
118 tVFS_NodeType *nt = h->Node->Type;
124 nt->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest);
128 // Allocate pages and read data
129 if( MM_Allocate(mapping_dest) == 0 ) {
131 Mutex_Release( &h->Node->Lock );
135 // TODO: Clip read length
136 read_len = nt->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE,
137 (void*)mapping_dest, 0);
138 // TODO: This was commented out, why?
139 if( read_len != PAGE_SIZE ) {
140 memset( (void*)(mapping_dest+read_len), 0, PAGE_SIZE-read_len );
143 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( (void*)mapping_dest );
144 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node );
145 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
146 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
147 pb->PhysAddrs[pagenum - pb->BaseOffset]);
151 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
152 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
153 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
154 pb->PhysAddrs[pagenum - pb->BaseOffset]);
156 h->Node->ReferenceCount ++;
159 if( !(Protection & MMAP_PROT_WRITE) ) {
160 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
163 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
166 if( Protection & MMAP_PROT_EXEC ) {
167 MM_SetFlags(mapping_dest, MM_PFLAG_EXEC, MM_PFLAG_EXEC);
170 MM_SetFlags(mapping_dest, 0, MM_PFLAG_EXEC);
175 LOG("Flag update on %p", mapping_dest);
176 if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
178 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
181 if( Flags & MMAP_MAP_PRIVATE )
182 MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
184 mapping_dest += PAGE_SIZE;
186 // Roll on to next block if needed
187 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
189 if( pb->Next && pb->Next->BaseOffset == pagenum )
193 tVFS_MMapPageBlock *oldpb = pb;
194 pb = malloc( sizeof(tVFS_MMapPageBlock) );
195 pb->Next = oldpb->Next;
196 pb->BaseOffset = pagenum;
197 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
203 Mutex_Release( &h->Node->Lock );
205 LEAVE('p', mapping_base);
206 return (void*)mapping_base;
209 int VFS_MUnmap(void *Addr, size_t Length)