Kernel/vfs - (minor) commenting only
[tpg/acess2.git] / KernelLand / Kernel / vfs / mmap.c
1 /*
2  * Acess2 Kernel VFS
3  * - By John Hodge (thePowersGang)
4  *
5  * mmap.c
6  * - VFS_MMap support
7  */
8 #define DEBUG   0
9 #include <acess.h>
10 #include <vfs.h>
11 #include <vfs_ext.h>
12 #include <vfs_int.h>
13
14 #define MMAP_PAGES_PER_BLOCK    16
15
16 // === STRUCTURES ===
17 typedef struct sVFS_MMapPageBlock       tVFS_MMapPageBlock;
18 struct sVFS_MMapPageBlock
19 {
20         tVFS_MMapPageBlock      *Next;
21         Uint64  BaseOffset;     // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
22         tPAddr  PhysAddrs[MMAP_PAGES_PER_BLOCK];
23 };
24
25 // === PROTOTYPES ===
26 //void  *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset);
27 void    *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask);
28 //int   VFS_MUnmap(void *Addr, size_t Length);
29
30 // === CODE ===
31 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
32 {
33         tVAddr  mapping_base;
34          int    npages, pagenum;
35         tVFS_MMapPageBlock      *pb, *prev;
36
37         ENTER("pDestHint iLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
38
39         if( Flags & MMAP_MAP_ANONYMOUS )
40                 Offset = (tVAddr)DestHint & 0xFFF;
41         
42         npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
43         pagenum = Offset / PAGE_SIZE;
44
45         mapping_base = (tVAddr)DestHint;
46         tPage   *mapping_dest = (void*)(mapping_base & ~(PAGE_SIZE-1));
47
48         if( DestHint == NULL )
49         {
50                 // TODO: Locate space for the allocation
51                 LEAVE('n');
52                 return NULL;
53         }
54
55         // Handle anonymous mappings
56         if( Flags & MMAP_MAP_ANONYMOUS )
57         {
58                 // TODO: Comvert \a Protection into a flag set
59                 void    *ret = VFS_MMap_Anon((void*)mapping_base, Length, 0, 0);
60                 LEAVE_RET('p', ret);
61         }
62
63         tVFS_Handle *h = VFS_GetHandle(FD);
64         if( !h || !h->Node )    LEAVE_RET('n', NULL);
65
66         LOG("h = %p", h);
67         
68         Mutex_Acquire( &h->Node->Lock );
69
70         // Search for existing mapping for each page
71         // - Sorted list of 16 page blocks
72         for(
73                 pb = h->Node->MMapInfo, prev = NULL;
74                 pb && pb->BaseOffset + MMAP_PAGES_PER_BLOCK < pagenum;
75                 prev = pb, pb = pb->Next
76                 )
77                 ;
78
79         LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
80
81         // - Allocate a block if needed
82         if( !pb || pb->BaseOffset > pagenum )
83         {
84                 void    *old_pb = pb;
85                 pb = calloc( 1, sizeof(tVFS_MMapPageBlock) );
86                 if(!pb) {
87                         Mutex_Release( &h->Node->Lock );
88                         LEAVE_RET('n', NULL);
89                 }
90                 pb->Next = old_pb;
91                 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
92                 if(prev)
93                         prev->Next = pb;
94                 else
95                         h->Node->MMapInfo = pb;
96         }
97
98         // - Map (and allocate) pages
99         while( npages -- )
100         {
101                 if( MM_GetPhysAddr( mapping_dest ) == 0 )
102                 {
103                         if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 )
104                         {
105                                 tVFS_NodeType   *nt = h->Node->Type;
106                                 if( !nt ) 
107                                 {
108                                         // TODO: error
109                                 }
110                                 else if( nt->MMap )
111                                         nt->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, mapping_dest);
112                                 else
113                                 {
114                                          int    read_len;
115                                         // Allocate pages and read data
116                                         if( MM_Allocate(mapping_dest) == 0 ) {
117                                                 // TODO: Unwrap
118                                                 Mutex_Release( &h->Node->Lock );
119                                                 LEAVE('n');
120                                                 return NULL;
121                                         }
122                                         // TODO: Clip read length
123                                         read_len = nt->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE,
124                                                 mapping_dest, 0);
125                                         // TODO: This was commented out, why?
126                                         if( read_len != PAGE_SIZE ) {
127                                                 memset( (char*)mapping_dest + read_len, 0, PAGE_SIZE-read_len );
128                                         }
129                                 }
130                                 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
131                                 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node );
132                                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
133                                 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
134                                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
135                         }
136                         else
137                         {
138                                 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
139                                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
140                                 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
141                                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
142                         }
143                         h->Node->ReferenceCount ++;
144                 
145                         // Set flags
146                         if( !(Protection & MMAP_PROT_WRITE) ) {
147                                 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
148                         }
149                         else {
150                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
151                         }
152                         
153                         if( Protection & MMAP_PROT_EXEC ) {
154                                 MM_SetFlags(mapping_dest, MM_PFLAG_EXEC, MM_PFLAG_EXEC);
155                         }
156                         else {
157                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_EXEC);
158                         }
159                 }
160                 else
161                 {
162                         LOG("Flag update on %p", mapping_dest);
163                         if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
164                         {
165                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
166                         }
167                 }
168                 if( Flags & MMAP_MAP_PRIVATE )
169                         MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
170                 pagenum ++;
171                 mapping_dest ++;
172
173                 // Roll on to next block if needed
174                 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
175                 {
176                         if( pb->Next && pb->Next->BaseOffset == pagenum )
177                                 pb = pb->Next;
178                         else
179                         {
180                                 tVFS_MMapPageBlock      *oldpb = pb;
181                                 pb = malloc( sizeof(tVFS_MMapPageBlock) );
182                                 pb->Next = oldpb->Next;
183                                 pb->BaseOffset = pagenum;
184                                 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
185                                 oldpb->Next = pb;
186                         }
187                 }
188         }
189         
190         Mutex_Release( &h->Node->Lock );
191
192         LEAVE('p', mapping_base);
193         return (void*)mapping_base;
194 }
195
196 void *VFS_MMap_Anon(void *Destination, size_t Length, Uint FlagsSet, Uint FlagsMask)
197 {
198         size_t  ofs = (tVAddr)Destination & (PAGE_SIZE-1);
199         tPage   *mapping_dest = (void*)( (char*)Destination - ofs );
200         
201         if( ofs > 0 )
202         {
203                 size_t  bytes = MIN(PAGE_SIZE - ofs, Length);
204                 
205                 // Allocate a partial page
206                 if( MM_GetPhysAddr(mapping_dest) )
207                 {
208                         // Already allocated page, clear the area we're touching
209                         ASSERT( ofs + bytes <= PAGE_SIZE );
210                         
211                         // TODO: Double check that this area isn't already zero
212                         memset( Destination, 0, bytes );
213                         
214                         MM_SetFlags(mapping_dest, FlagsSet, FlagsMask);
215                         
216                         LOG("#1: Clear %i from %p", Length, Destination);
217                 }
218                 else
219                 {
220                         MM_AllocateZero(mapping_dest);
221                         LOG("#1: Allocate for %p", Destination);
222                 }
223                 mapping_dest ++;
224                 Length -= bytes;
225         }
226         while( Length >= PAGE_SIZE )
227         {
228                 if( MM_GetPhysAddr( mapping_dest ) )
229                 {
230                         // We're allocating entire pages here, so free this page and replace with a COW zero
231                         MM_Deallocate(mapping_dest);
232                         LOG("Replace %p with zero page", mapping_dest);
233                 }
234                 else
235                 {
236                         LOG("Allocate zero at %p", mapping_dest);
237                 }
238                 MM_AllocateZero(mapping_dest);
239                 
240                 mapping_dest ++;
241                 Length -= PAGE_SIZE;
242         }
243         if( Length > 0 )
244         {
245                 ASSERT(Length < PAGE_SIZE);
246                 
247                 // Tail page
248                 if( MM_GetPhysAddr(mapping_dest) )
249                 {
250                         // TODO: Don't touch page if already zero
251                         memset( mapping_dest, 0, Length );
252                         LOG("Clear %i in %p", Length, mapping_dest);
253                 }
254                 else
255                 {
256                         MM_AllocateZero(mapping_dest);
257                         LOG("Anon map to %p", mapping_dest);
258                 }
259         }
260         
261         return Destination;
262 }
263
264 int VFS_MUnmap(void *Addr, size_t Length)
265 {
266         return 0;
267 }

UCC git Repository :: git.ucc.asn.au