Kernel/VFS - Truncate support, mmap fixes
[tpg/acess2.git] / KernelLand / Kernel / vfs / mmap.c
1 /*
2  * Acess2 Kernel VFS
3  * - By John Hodge (thePowersGang)
4  *
5  * mmap.c
6  * - VFS_MMap support
7  */
8 #define DEBUG   0
9 #include <acess.h>
10 #include <vfs.h>
11 #include <vfs_ext.h>
12 #include <vfs_int.h>
13
14 #define MMAP_PAGES_PER_BLOCK    16
15
16 // === STRUCTURES ===
17 typedef struct sVFS_MMapPageBlock       tVFS_MMapPageBlock;
18 struct sVFS_MMapPageBlock
19 {
20         tVFS_MMapPageBlock      *Next;
21         Uint64  BaseOffset;     // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
22         tPAddr  PhysAddrs[MMAP_PAGES_PER_BLOCK];
23 };
24
25 // === PROTOTYPES ===
26 //void  *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset);
27 void    *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask);
28 int     VFS_MMap_MapPage(tVFS_Node *Node, unsigned int PageNum, tVFS_MMapPageBlock *pb, void *mapping_dest, unsigned int Protection);
29 //int   VFS_MUnmap(void *Addr, size_t Length);
30
31 // === CODE ===
32 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
33 {
34         ENTER("pDestHint iLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
35
36         if( Flags & MMAP_MAP_ANONYMOUS )
37                 Offset = (tVAddr)DestHint & 0xFFF;
38         
39         unsigned int npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
40         unsigned int pagenum = Offset / PAGE_SIZE;
41
42         tVAddr mapping_base = (tVAddr)DestHint;
43
44         if( Flags & MMAP_MAP_FIXED )
45         {
46                 ASSERT( (Flags & MMAP_MAP_FIXED) && DestHint != NULL );
47                 // Keep and use the hint
48                 // - TODO: Validate that the region pointed to by the hint is correct
49         }
50         else
51         {
52                 Log_Warning("VFS", "MMap: TODO Handle non-fixed mappings");
53                 if( DestHint == NULL )
54                 {
55                         // TODO: Locate space for the allocation
56                         Log_Warning("VFS", "Mmap: Handle NULL destination hint");
57                         LEAVE('n');
58                         return NULL;
59                 }
60         }
61         tPage   *mapping_dest = (void*)(mapping_base & ~(PAGE_SIZE-1));
62
63         // Handle anonymous mappings
64         if( Flags & MMAP_MAP_ANONYMOUS )
65         {
66                 // TODO: Comvert \a Protection into a flag set
67                 void    *ret = VFS_MMap_Anon((void*)mapping_base, Length, 0, 0);
68                 LEAVE_RET('p', ret);
69         }
70
71         tVFS_Handle *h = VFS_GetHandle(FD);
72         if( !h || !h->Node )    LEAVE_RET('n', NULL);
73
74         LOG("h = %p", h);
75         
76         Mutex_Acquire( &h->Node->Lock );
77
78         tVFS_MMapPageBlock      *pb, **pb_pnp = (tVFS_MMapPageBlock**)&h->Node->MMapInfo;
79         // Search for existing mapping for each page
80         // - Sorted list of 16 page blocks
81         for( pb = h->Node->MMapInfo; pb; pb_pnp = &pb->Next, pb = pb->Next )
82         {
83                 if( pb->BaseOffset + MMAP_PAGES_PER_BLOCK <= pagenum )
84                         break;
85         }
86
87         LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
88
89         // - Allocate a block if needed
90         if( !pb || pb->BaseOffset > pagenum )
91         {
92                 void    *old_pb = pb;
93                 pb = calloc( 1, sizeof(tVFS_MMapPageBlock) );
94                 if(!pb) {
95                         Mutex_Release( &h->Node->Lock );
96                         LEAVE_RET('n', NULL);
97                 }
98                 pb->Next = old_pb;
99                 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
100                 *pb_pnp = pb;
101         }
102
103         // - Map (and allocate) pages
104         while( npages -- )
105         {
106                 ASSERTC( pagenum, >=, pb->BaseOffset );
107                 ASSERTC( pagenum - pb->BaseOffset, <, MMAP_PAGES_PER_BLOCK );
108                 if( MM_GetPhysAddr( mapping_dest ) == 0 )
109                 {
110                         LOG("Map page to %p", mapping_dest);
111                         if( VFS_MMap_MapPage(h->Node, pagenum, pb, mapping_dest, Protection) )
112                         {
113                                 Mutex_Release( &h->Node->Lock );
114                                 LEAVE('n');
115                                 return NULL;
116                         }
117                 }
118                 else
119                 {
120                         LOG("Flag update on %p", mapping_dest);
121                         if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
122                         {
123                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
124                         }
125                 }
126                 if( Flags & MMAP_MAP_PRIVATE ) {
127                         // TODO: Don't allow the page to change underneath either
128                         MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
129                 }
130                 pagenum ++;
131                 mapping_dest ++;
132
133                 // Roll on to next block if needed
134                 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
135                 {
136                         if( !pb->Next || pb->Next->BaseOffset != pagenum )
137                         {
138                                 if( pb->Next )  ASSERTC(pb->Next->BaseOffset % MMAP_PAGES_PER_BLOCK, ==, 0);
139                                 tVFS_MMapPageBlock      *newpb = malloc( sizeof(tVFS_MMapPageBlock) );
140                                 newpb->Next = pb->Next;
141                                 newpb->BaseOffset = pagenum;
142                                 memset(newpb->PhysAddrs, 0, sizeof(newpb->PhysAddrs));
143                                 pb->Next = newpb;
144                         }
145         
146                         pb = pb->Next;
147                 }
148         }
149         
150         Mutex_Release( &h->Node->Lock );
151
152         LEAVE('p', mapping_base);
153         return (void*)mapping_base;
154 }
155
156 void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask)
157 {
158         size_t  ofs = (tVAddr)Destination & (PAGE_SIZE-1);
159         tPage   *mapping_dest = (void*)( (char*)Destination - ofs );
160         
161         if( ofs > 0 )
162         {
163                 size_t  bytes = MIN(PAGE_SIZE - ofs, Length);
164                 
165                 // Allocate a partial page
166                 if( MM_GetPhysAddr(mapping_dest) )
167                 {
168                         // Already allocated page, clear the area we're touching
169                         ASSERT( ofs + bytes <= PAGE_SIZE );
170                         
171                         // TODO: Double check that this area isn't already zero
172                         memset( Destination, 0, bytes );
173                         
174                         MM_SetFlags(mapping_dest, FlagsSet, FlagsMask);
175                         
176                         LOG("#1: Clear %i from %p", Length, Destination);
177                 }
178                 else
179                 {
180                         MM_AllocateZero(mapping_dest);
181                         LOG("#1: Allocate for %p", Destination);
182                 }
183                 mapping_dest ++;
184                 Length -= bytes;
185         }
186         while( Length >= PAGE_SIZE )
187         {
188                 if( MM_GetPhysAddr( mapping_dest ) )
189                 {
190                         // We're allocating entire pages here, so free this page and replace with a COW zero
191                         MM_Deallocate(mapping_dest);
192                         LOG("Replace %p with zero page", mapping_dest);
193                 }
194                 else
195                 {
196                         LOG("Allocate zero at %p", mapping_dest);
197                 }
198                 MM_AllocateZero(mapping_dest);
199                 
200                 mapping_dest ++;
201                 Length -= PAGE_SIZE;
202         }
203         if( Length > 0 )
204         {
205                 ASSERT(Length < PAGE_SIZE);
206                 
207                 // Tail page
208                 if( MM_GetPhysAddr(mapping_dest) )
209                 {
210                         // TODO: Don't touch page if already zero
211                         memset( mapping_dest, 0, Length );
212                         LOG("Clear %i in %p", Length, mapping_dest);
213                 }
214                 else
215                 {
216                         MM_AllocateZero(mapping_dest);
217                         LOG("Anon map to %p", mapping_dest);
218                 }
219         }
220         
221         return Destination;
222 }
223
224 int VFS_MMap_MapPage(tVFS_Node *Node, unsigned int pagenum, tVFS_MMapPageBlock *pb, void *mapping_dest, unsigned int Protection)
225 {
226         if( pb->PhysAddrs[pagenum - pb->BaseOffset] != 0 )
227         {
228                 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
229                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
230                 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
231                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
232         }
233         else
234         {
235                 tVFS_NodeType   *nt = Node->Type;
236                 if( !nt ) 
237                 {
238                         // TODO: error
239                 }
240                 else if( nt->MMap )
241                         nt->MMap(Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest);
242                 else
243                 {
244                          int    read_len;
245                         // Allocate pages and read data
246                         if( MM_Allocate(mapping_dest) == 0 ) {
247                                 // TODO: Unwrap
248                                 return 1;
249                         }
250                         // TODO: Clip read length
251                         read_len = nt->Read(Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest, 0);
252                         // TODO: This was commented out, why?
253                         if( read_len != PAGE_SIZE ) {
254                                 memset( (char*)mapping_dest + read_len, 0, PAGE_SIZE-read_len );
255                         }
256                 }
257                 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
258                 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], Node );
259                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
260                 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
261                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
262         }
263         // TODO: Huh?
264         Node->ReferenceCount ++;
265
266         // Set flags
267         if( !(Protection & MMAP_PROT_WRITE) ) {
268                 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
269         }
270         else {
271                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
272         }
273         
274         if( Protection & MMAP_PROT_EXEC ) {
275                 MM_SetFlags(mapping_dest, MM_PFLAG_EXEC, MM_PFLAG_EXEC);
276         }
277         else {
278                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_EXEC);
279         }
280         
281         return 0;
282 }
283
284 int VFS_MUnmap(void *Addr, size_t Length)
285 {
286         return 0;
287 }

UCC git Repository :: git.ucc.asn.au