Kernel/ARMv7 - Fixed not using ASIDs
[tpg/acess2.git] / Kernel / vfs / mmap.c
1 /*
2  * Acess2 Kernel VFS
3  * - By John Hodge (thePowersGang)
4  *
5  * mmap.c
6  * - VFS_MMap support
7  */
8 #define DEBUG   0
9 #include <acess.h>
10 #include <vfs.h>
11 #include <vfs_ext.h>
12 #include <vfs_int.h>
13
14 #define MMAP_PAGES_PER_BLOCK    16
15
16 // === STRUCTURES ===
17 typedef struct sVFS_MMapPageBlock       tVFS_MMapPageBlock;
18 struct sVFS_MMapPageBlock
19 {
20         tVFS_MMapPageBlock      *Next;
21         Uint64  BaseOffset;     // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
22         tPAddr  PhysAddrs[MMAP_PAGES_PER_BLOCK];
23 };
24
25 // === CODE ===
26 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
27 {
28         tVFS_Handle     *h;
29         tVAddr  mapping_dest, mapping_base;
30          int    npages, pagenum;
31         tVFS_MMapPageBlock      *pb, *prev;
32
33         ENTER("pDestHint iLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
34
35         if( Flags & MMAP_MAP_ANONYMOUS )
36                 Offset = (tVAddr)DestHint & 0xFFF;
37         
38         npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
39         pagenum = Offset / PAGE_SIZE;
40
41         mapping_base = (tVAddr)DestHint;
42         mapping_dest = mapping_base & ~(PAGE_SIZE-1);
43
44         // TODO: Locate space for the allocation
45
46         // Handle anonymous mappings
47         if( Flags & MMAP_MAP_ANONYMOUS )
48         {
49                 size_t  ofs = 0;
50                 LOG("%i pages anonymous to %p", npages, mapping_dest);
51                 for( ; npages --; mapping_dest += PAGE_SIZE, ofs += PAGE_SIZE )
52                 {
53                         if( MM_GetPhysAddr(mapping_dest) ) {
54                                 // TODO: Set flags to COW if needed (well, if shared)
55                                 MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
56                                 LOG("clear from %p, %i bytes", (void*)(mapping_base + ofs),
57                                         PAGE_SIZE - (mapping_base & (PAGE_SIZE-1))
58                                         );
59                                 memset( (void*)(mapping_base + ofs), 0, PAGE_SIZE - (mapping_base & (PAGE_SIZE-1)));
60                         }
61                         else {
62                                 LOG("New empty page");
63                                 // TODO: Map a COW zero page instead
64                                 if( !MM_Allocate(mapping_dest) ) {
65                                         // TODO: Error
66                                         Log_Warning("VFS", "VFS_MMap: Anon alloc to %p failed", mapping_dest);
67                                 }
68                                 memset((void*)mapping_dest, 0, PAGE_SIZE);
69                                 LOG("Anon map to %p", mapping_dest);
70                         }
71                 }
72                 LEAVE_RET('p', (void*)mapping_base);
73         }
74
75         h = VFS_GetHandle(FD);
76         if( !h || !h->Node )    LEAVE_RET('n', NULL);
77
78         LOG("h = %p", h);
79         
80         Mutex_Acquire( &h->Node->Lock );
81
82         // Search for existing mapping for each page
83         // - Sorted list of 16 page blocks
84         for(
85                 pb = h->Node->MMapInfo, prev = NULL;
86                 pb && pb->BaseOffset + MMAP_PAGES_PER_BLOCK < pagenum;
87                 prev = pb, pb = pb->Next
88                 );
89
90         LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
91
92         // - Allocate a block if needed
93         if( !pb || pb->BaseOffset > pagenum )
94         {
95                 void    *old_pb = pb;
96                 pb = malloc( sizeof(tVFS_MMapPageBlock) );
97                 if(!pb) {
98                         Mutex_Release( &h->Node->Lock );
99                         LEAVE_RET('n', NULL);
100                 }
101                 pb->Next = old_pb;
102                 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
103                 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
104                 if(prev)
105                         prev->Next = pb;
106                 else
107                         h->Node->MMapInfo = pb;
108         }
109
110         // - Map (and allocate) pages
111         while( npages -- )
112         {
113                 if( MM_GetPhysAddr(mapping_dest) == 0 )
114                 {
115                         if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 )
116                         {
117                                 if( h->Node->MMap )
118                                         h->Node->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest);
119                                 else
120                                 {
121                                          int    read_len;
122                                         // Allocate pages and read data
123                                         if( MM_Allocate(mapping_dest) == 0 ) {
124                                                 // TODO: Unwrap
125                                                 Mutex_Release( &h->Node->Lock );
126                                                 LEAVE('n');
127                                                 return NULL;
128                                         }
129                                         // TODO: Clip read length
130                                         read_len = h->Node->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest);
131 //                                      if( read_len != PAGE_SIZE ) {
132 //                                              memset( (void*)(mapping_dest+read_len), 0, PAGE_SIZE-read_len );
133 //                                      }
134                                 }
135                                 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
136                                 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node );
137                                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
138                                 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
139                                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
140                         }
141                         else
142                         {
143                                 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
144                                 MM_RefPhys( pb->PhysAddrs[pagenum - pb->BaseOffset] );
145                                 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
146                                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
147                         }
148                         h->Node->ReferenceCount ++;
149                 
150                         // Set flags
151                         if( !(Protection & MMAP_PROT_WRITE) ) {
152                                 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
153                         }
154                         else {
155                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
156                         }
157                         
158                         if( Protection & MMAP_PROT_EXEC ) {
159                                 MM_SetFlags(mapping_dest, MM_PFLAG_EXEC, MM_PFLAG_EXEC);
160                         }
161                         else {
162                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_EXEC);
163                         }
164                 }
165                 else
166                 {
167                         LOG("Flag update on %p", mapping_dest);
168                         if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
169                         {
170                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
171                         }
172                 }
173                 if( Flags & MMAP_MAP_PRIVATE )
174                         MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
175                 pagenum ++;
176                 mapping_dest += PAGE_SIZE;
177
178                 // Roll on to next block if needed
179                 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
180                 {
181                         if( pb->Next && pb->Next->BaseOffset == pagenum )
182                                 pb = pb->Next;
183                         else
184                         {
185                                 tVFS_MMapPageBlock      *oldpb = pb;
186                                 pb = malloc( sizeof(tVFS_MMapPageBlock) );
187                                 pb->Next = oldpb->Next;
188                                 pb->BaseOffset = pagenum;
189                                 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
190                                 oldpb->Next = pb;
191                         }
192                         pagenum = 0;
193                 }
194         }
195         
196         Mutex_Release( &h->Node->Lock );
197
198         LEAVE('p', mapping_base);
199         return (void*)mapping_base;
200 }
201
202 int VFS_MUnmap(void *Addr, size_t Length)
203 {
204         return 0;
205 }

UCC git Repository :: git.ucc.asn.au