Kernel - MMap fixed (non-fixed mappings, bad search), SHM mmap only working
[tpg/acess2.git] / KernelLand / Kernel / vfs / mmap.c
1 /*
2  * Acess2 Kernel VFS
3  * - By John Hodge (thePowersGang)
4  *
5  * mmap.c
6  * - VFS_MMap support
7  */
8 #define DEBUG   0
9 #include <acess.h>
10 #include <vfs.h>
11 #include <vfs_ext.h>
12 #include <vfs_int.h>
13 #include <mm_virt.h>    // MM_USER_MAX
14
15 #define MMAP_PAGES_PER_BLOCK    16
16
17 // === STRUCTURES ===
18 typedef struct sVFS_MMapPageBlock       tVFS_MMapPageBlock;
19 struct sVFS_MMapPageBlock
20 {
21         tVFS_MMapPageBlock      *Next;
22         Uint64  BaseOffset;     // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
23         tPAddr  PhysAddrs[MMAP_PAGES_PER_BLOCK];
24 };
25
26 // === PROTOTYPES ===
27 //void  *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset);
28 void    *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask);
29 int     VFS_MMap_MapPage(tVFS_Node *Node, unsigned int PageNum, tVFS_MMapPageBlock *pb, void *mapping_dest, unsigned int Protection);
30 //int   VFS_MUnmap(void *Addr, size_t Length);
31 bool    _range_free(const tPage *Base, Uint NumPages);
32
33 // === CODE ===
34 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
35 {
36         ENTER("pDestHint xLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
37
38         if( Flags & MMAP_MAP_ANONYMOUS )
39                 Offset = (tVAddr)DestHint & 0xFFF;
40         
41         unsigned int npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
42         unsigned int pagenum = Offset / PAGE_SIZE;
43         LOG("npages=%u,pagenum=%u", npages, pagenum);
44
45         tVAddr mapping_base = (tVAddr)DestHint;
46
47         if( Flags & MMAP_MAP_FIXED )
48         {
49                 ASSERT( (Flags & MMAP_MAP_FIXED) && DestHint != NULL );
50                 // Keep and use the hint
51                 // - TODO: Validate that the region pointed to by the hint is correct
52         }
53         else
54         {
55                 Log_Warning("VFS", "MMap: TODO Handle non-fixed mappings");
56                 
57                 // Locate a free location in the address space (between brk and MM_USER_MAX)
58                 // TODO: Prefer first location after DestHint, but can go below
59                 
60                 // Search downwards from the top of user memory
61                 mapping_base = 0;
62                 for( tPage *dst = (tPage*)MM_USER_MAX - npages; dst > (tPage*)PAGE_SIZE; dst -- )
63                 {
64                         if( _range_free(dst, npages) ) {
65                                 mapping_base = (tVAddr)dst;
66                                 break;
67                         }
68                 }
69                 if( mapping_base == 0 )
70                 {
71                         Log_Warning("VFS", "MMap: Out of address space");
72                         errno = ENOMEM;
73                         LEAVE('n');
74                         return NULL;
75                 }
76         }
77         tPage   *mapping_dest = (void*)(mapping_base & ~(PAGE_SIZE-1));
78
79         if( !_range_free(mapping_dest, npages) )
80         {
81                 LOG("Specified range is not free");
82                 //errno = EINVAL;
83                 //LEAVE('n');
84                 //return NULL;
85                 Log_Warning("VFS", "MMap: Overwriting/replacing maps at %p+%x", mapping_base, Length);
86         }
87
88         // Handle anonymous mappings
89         if( Flags & MMAP_MAP_ANONYMOUS )
90         {
91                 // TODO: Comvert \a Protection into a flag set
92                 void    *ret = VFS_MMap_Anon((void*)mapping_base, Length, 0, 0);
93                 LEAVE_RET('p', ret);
94         }
95
96         tVFS_Handle *h = VFS_GetHandle(FD);
97         if( !h || !h->Node )    LEAVE_RET('n', NULL);
98
99         LOG("h = %p", h);
100         
101         Mutex_Acquire( &h->Node->Lock );
102
103         tVFS_MMapPageBlock      *pb, **pb_pnp = (tVFS_MMapPageBlock**)&h->Node->MMapInfo;
104         // Search for existing mapping for each page
105         // - Sorted list of 16 page blocks
106         for( pb = h->Node->MMapInfo; pb; pb_pnp = &pb->Next, pb = pb->Next )
107         {
108                 if( pb->BaseOffset + MMAP_PAGES_PER_BLOCK > pagenum )
109                         break;
110         }
111
112         LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
113
114         // - Allocate a block if needed
115         if( !pb || pb->BaseOffset > pagenum )
116         {
117                 void    *old_pb = pb;
118                 pb = calloc( 1, sizeof(tVFS_MMapPageBlock) );
119                 if(!pb) {
120                         Mutex_Release( &h->Node->Lock );
121                         LEAVE_RET('n', NULL);
122                 }
123                 pb->Next = old_pb;
124                 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
125                 *pb_pnp = pb;
126         }
127
128         // - Map (and allocate) pages
129         while( npages -- )
130         {
131                 ASSERTC( pagenum, >=, pb->BaseOffset );
132                 ASSERTC( pagenum - pb->BaseOffset, <, MMAP_PAGES_PER_BLOCK );
133                 if( MM_GetPhysAddr( mapping_dest ) == 0 )
134                 {
135                         LOG("Map page to %p", mapping_dest);
136                         if( VFS_MMap_MapPage(h->Node, pagenum, pb, mapping_dest, Protection) )
137                         {
138                                 Mutex_Release( &h->Node->Lock );
139                                 LEAVE('n');
140                                 return NULL;
141                         }
142                 }
143                 else
144                 {
145                         LOG("Flag update on %p", mapping_dest);
146                         if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
147                         {
148                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
149                         }
150                 }
151                 if( Flags & MMAP_MAP_PRIVATE ) {
152                         // TODO: Don't allow the page to change underneath either
153                         MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
154                 }
155                 pagenum ++;
156                 mapping_dest ++;
157
158                 // Roll on to next block if needed
159                 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
160                 {
161                         if( !pb->Next || pb->Next->BaseOffset != pagenum )
162                         {
163                                 if( pb->Next )  ASSERTC(pb->Next->BaseOffset % MMAP_PAGES_PER_BLOCK, ==, 0);
164                                 tVFS_MMapPageBlock      *newpb = malloc( sizeof(tVFS_MMapPageBlock) );
165                                 newpb->Next = pb->Next;
166                                 newpb->BaseOffset = pagenum;
167                                 memset(newpb->PhysAddrs, 0, sizeof(newpb->PhysAddrs));
168                                 pb->Next = newpb;
169                         }
170         
171                         pb = pb->Next;
172                 }
173         }
174         
175         Mutex_Release( &h->Node->Lock );
176
177         LEAVE('p', mapping_base);
178         return (void*)mapping_base;
179 }
180
181 void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask)
182 {
183         size_t  ofs = (tVAddr)Destination & (PAGE_SIZE-1);
184         tPage   *mapping_dest = (void*)( (char*)Destination - ofs );
185         
186         if( ofs > 0 )
187         {
188                 size_t  bytes = MIN(PAGE_SIZE - ofs, Length);
189                 
190                 // Allocate a partial page
191                 if( MM_GetPhysAddr(mapping_dest) )
192                 {
193                         // Already allocated page, clear the area we're touching
194                         ASSERT( ofs + bytes <= PAGE_SIZE );
195                         
196                         // TODO: Double check that this area isn't already zero
197                         memset( Destination, 0, bytes );
198                         
199                         MM_SetFlags(mapping_dest, FlagsSet, FlagsMask);
200                         
201                         LOG("#1: Clear %i from %p", Length, Destination);
202                 }
203                 else
204                 {
205                         MM_AllocateZero(mapping_dest);
206                         LOG("#1: Allocate for %p", Destination);
207                 }
208                 mapping_dest ++;
209                 Length -= bytes;
210         }
211         while( Length >= PAGE_SIZE )
212         {
213                 if( MM_GetPhysAddr( mapping_dest ) )
214                 {
215                         // We're allocating entire pages here, so free this page and replace with a COW zero
216                         MM_Deallocate(mapping_dest);
217                         LOG("Replace %p with zero page", mapping_dest);
218                 }
219                 else
220                 {
221                         LOG("Allocate zero at %p", mapping_dest);
222                 }
223                 MM_AllocateZero(mapping_dest);
224                 
225                 mapping_dest ++;
226                 Length -= PAGE_SIZE;
227         }
228         if( Length > 0 )
229         {
230                 ASSERT(Length < PAGE_SIZE);
231                 
232                 // Tail page
233                 if( MM_GetPhysAddr(mapping_dest) )
234                 {
235                         // TODO: Don't touch page if already zero
236                         memset( mapping_dest, 0, Length );
237                         LOG("Clear %i in %p", Length, mapping_dest);
238                 }
239                 else
240                 {
241                         MM_AllocateZero(mapping_dest);
242                         LOG("Anon map to %p", mapping_dest);
243                 }
244         }
245         
246         return Destination;
247 }
248
249 int VFS_MMap_MapPage(tVFS_Node *Node, unsigned int pagenum, tVFS_MMapPageBlock *pb, void *mapping_dest, unsigned int Protection)
250 {
251         if( pb->PhysAddrs[pagenum - pb->BaseOffset] != 0 )
252         {
253                 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
254                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
255                 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
256                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
257         }
258         else
259         {
260                 tVFS_NodeType   *nt = Node->Type;
261                 if( !nt ) 
262                 {
263                         // TODO: error
264                 }
265                 else if( nt->MMap )
266                         nt->MMap(Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest);
267                 else
268                 {
269                          int    read_len;
270                         // Allocate pages and read data
271                         if( MM_Allocate(mapping_dest) == 0 ) {
272                                 // TODO: Unwrap
273                                 return 1;
274                         }
275                         // TODO: Clip read length
276                         read_len = nt->Read(Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest, 0);
277                         // TODO: This was commented out, why?
278                         if( read_len != PAGE_SIZE ) {
279                                 memset( (char*)mapping_dest + read_len, 0, PAGE_SIZE-read_len );
280                         }
281                 }
282                 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
283                 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], Node );
284                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
285                 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
286                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
287         }
288         // TODO: Huh?
289         Node->ReferenceCount ++;
290
291         // Set flags
292         if( !(Protection & MMAP_PROT_WRITE) ) {
293                 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
294         }
295         else {
296                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
297         }
298         
299         if( Protection & MMAP_PROT_EXEC ) {
300                 MM_SetFlags(mapping_dest, MM_PFLAG_EXEC, MM_PFLAG_EXEC);
301         }
302         else {
303                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_EXEC);
304         }
305         
306         return 0;
307 }
308
309 int VFS_MUnmap(void *Addr, size_t Length)
310 {
311         UNIMPLEMENTED();
312         return 0;
313 }
314
315 bool _range_free(const tPage *Base, Uint NumPages)
316 {
317         for( int i = 0; i < NumPages; i ++ )
318         {
319                 if( MM_GetPhysAddr(Base + i) )
320                 {
321                         // Oh.
322                         return false;
323                 }
324         }
325         return true;
326 }

UCC git Repository :: git.ucc.asn.au