b6d5c2d91921425bb8864cb04dd797e91776526a
[tpg/acess2.git] / KernelLand / Kernel / vfs / mmap.c
1 /*
2  * Acess2 Kernel VFS
3  * - By John Hodge (thePowersGang)
4  *
5  * mmap.c
6  * - VFS_MMap support
7  */
8 #define DEBUG   0
9 #include <acess.h>
10 #include <vfs.h>
11 #include <vfs_ext.h>
12 #include <vfs_int.h>
13
14 #define MMAP_PAGES_PER_BLOCK    16
15
16 // === STRUCTURES ===
17 typedef struct sVFS_MMapPageBlock       tVFS_MMapPageBlock;
18 struct sVFS_MMapPageBlock
19 {
20         tVFS_MMapPageBlock      *Next;
21         Uint64  BaseOffset;     // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
22         tPAddr  PhysAddrs[MMAP_PAGES_PER_BLOCK];
23 };
24
25 // === PROTOTYPES ===
26 //void  *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset);
27 void    *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask);
28 //int   VFS_MUnmap(void *Addr, size_t Length);
29
30 // === CODE ===
31 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
32 {
33         tVAddr  mapping_base;
34          int    npages, pagenum;
35         tVFS_MMapPageBlock      *pb, *prev;
36
37         ENTER("pDestHint iLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
38
39         if( Flags & MMAP_MAP_ANONYMOUS )
40                 Offset = (tVAddr)DestHint & 0xFFF;
41         
42         npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
43         pagenum = Offset / PAGE_SIZE;
44
45         mapping_base = (tVAddr)DestHint;
46         tPage   *mapping_dest = (void*)(mapping_base & ~(PAGE_SIZE-1));
47
48         if( DestHint == NULL )
49         {
50                 // TODO: Locate space for the allocation
51                 LEAVE('n');
52                 return NULL;
53         }
54
55         // Handle anonymous mappings
56         if( Flags & MMAP_MAP_ANONYMOUS )
57         {
58                 // TODO: Comvert \a Protection into a flag set
59                 void    *ret = VFS_MMap_Anon((void*)mapping_base, Length, 0, 0);
60                 LEAVE_RET('p', ret);
61         }
62
63         tVFS_Handle *h = VFS_GetHandle(FD);
64         if( !h || !h->Node )    LEAVE_RET('n', NULL);
65
66         LOG("h = %p", h);
67         
68         Mutex_Acquire( &h->Node->Lock );
69
70         // Search for existing mapping for each page
71         // - Sorted list of 16 page blocks
72         for(
73                 pb = h->Node->MMapInfo, prev = NULL;
74                 pb && pb->BaseOffset + MMAP_PAGES_PER_BLOCK <= pagenum;
75                 prev = pb, pb = pb->Next
76                 )
77                 ;
78
79         LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
80
81         // - Allocate a block if needed
82         if( !pb || pb->BaseOffset > pagenum )
83         {
84                 void    *old_pb = pb;
85                 pb = calloc( 1, sizeof(tVFS_MMapPageBlock) );
86                 if(!pb) {
87                         Mutex_Release( &h->Node->Lock );
88                         LEAVE_RET('n', NULL);
89                 }
90                 pb->Next = old_pb;
91                 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
92                 if(prev)
93                         prev->Next = pb;
94                 else
95                         h->Node->MMapInfo = pb;
96         }
97
98         // - Map (and allocate) pages
99         while( npages -- )
100         {
101                 assert( pagenum >= pb->BaseOffset );
102                 assert( pagenum - pb->BaseOffset < MMAP_PAGES_PER_BLOCK );
103                 if( MM_GetPhysAddr( mapping_dest ) == 0 )
104                 {
105                         if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 )
106                         {
107                                 tVFS_NodeType   *nt = h->Node->Type;
108                                 if( !nt ) 
109                                 {
110                                         // TODO: error
111                                 }
112                                 else if( nt->MMap )
113                                         nt->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest);
114                                 else
115                                 {
116                                          int    read_len;
117                                         // Allocate pages and read data
118                                         if( MM_Allocate(mapping_dest) == 0 ) {
119                                                 // TODO: Unwrap
120                                                 Mutex_Release( &h->Node->Lock );
121                                                 LEAVE('n');
122                                                 return NULL;
123                                         }
124                                         // TODO: Clip read length
125                                         read_len = nt->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE,
126                                                 mapping_dest, 0);
127                                         // TODO: This was commented out, why?
128                                         if( read_len != PAGE_SIZE ) {
129                                                 memset( (char*)mapping_dest + read_len, 0, PAGE_SIZE-read_len );
130                                         }
131                                 }
132                                 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
133                                 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node );
134                                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
135                                 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
136                                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
137                         }
138                         else
139                         {
140                                 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
141                                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
142                                 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
143                                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
144                         }
145                         h->Node->ReferenceCount ++;
146                 
147                         // Set flags
148                         if( !(Protection & MMAP_PROT_WRITE) ) {
149                                 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
150                         }
151                         else {
152                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
153                         }
154                         
155                         if( Protection & MMAP_PROT_EXEC ) {
156                                 MM_SetFlags(mapping_dest, MM_PFLAG_EXEC, MM_PFLAG_EXEC);
157                         }
158                         else {
159                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_EXEC);
160                         }
161                 }
162                 else
163                 {
164                         LOG("Flag update on %p", mapping_dest);
165                         if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
166                         {
167                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
168                         }
169                 }
170                 if( Flags & MMAP_MAP_PRIVATE )
171                         MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
172                 pagenum ++;
173                 mapping_dest ++;
174
175                 // Roll on to next block if needed
176                 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
177                 {
178                         if( pb->Next && pb->Next->BaseOffset == pagenum )
179                                 pb = pb->Next;
180                         else
181                         {
182                                 tVFS_MMapPageBlock      *oldpb = pb;
183                                 pb = malloc( sizeof(tVFS_MMapPageBlock) );
184                                 pb->Next = oldpb->Next;
185                                 pb->BaseOffset = pagenum;
186                                 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
187                                 oldpb->Next = pb;
188                         }
189                 }
190         }
191         
192         Mutex_Release( &h->Node->Lock );
193
194         LEAVE('p', mapping_base);
195         return (void*)mapping_base;
196 }
197
198 void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask)
199 {
200         size_t  ofs = (tVAddr)Destination & (PAGE_SIZE-1);
201         tPage   *mapping_dest = (void*)( (char*)Destination - ofs );
202         
203         if( ofs > 0 )
204         {
205                 size_t  bytes = MIN(PAGE_SIZE - ofs, Length);
206                 
207                 // Allocate a partial page
208                 if( MM_GetPhysAddr(mapping_dest) )
209                 {
210                         // Already allocated page, clear the area we're touching
211                         ASSERT( ofs + bytes <= PAGE_SIZE );
212                         
213                         // TODO: Double check that this area isn't already zero
214                         memset( Destination, 0, bytes );
215                         
216                         MM_SetFlags(mapping_dest, FlagsSet, FlagsMask);
217                         
218                         LOG("#1: Clear %i from %p", Length, Destination);
219                 }
220                 else
221                 {
222                         MM_AllocateZero(mapping_dest);
223                         LOG("#1: Allocate for %p", Destination);
224                 }
225                 mapping_dest ++;
226                 Length -= bytes;
227         }
228         while( Length >= PAGE_SIZE )
229         {
230                 if( MM_GetPhysAddr( mapping_dest ) )
231                 {
232                         // We're allocating entire pages here, so free this page and replace with a COW zero
233                         MM_Deallocate(mapping_dest);
234                         LOG("Replace %p with zero page", mapping_dest);
235                 }
236                 else
237                 {
238                         LOG("Allocate zero at %p", mapping_dest);
239                 }
240                 MM_AllocateZero(mapping_dest);
241                 
242                 mapping_dest ++;
243                 Length -= PAGE_SIZE;
244         }
245         if( Length > 0 )
246         {
247                 ASSERT(Length < PAGE_SIZE);
248                 
249                 // Tail page
250                 if( MM_GetPhysAddr(mapping_dest) )
251                 {
252                         // TODO: Don't touch page if already zero
253                         memset( mapping_dest, 0, Length );
254                         LOG("Clear %i in %p", Length, mapping_dest);
255                 }
256                 else
257                 {
258                         MM_AllocateZero(mapping_dest);
259                         LOG("Anon map to %p", mapping_dest);
260                 }
261         }
262         
263         return Destination;
264 }
265
266 int VFS_MUnmap(void *Addr, size_t Length)
267 {
268         return 0;
269 }

UCC git Repository :: git.ucc.asn.au