9593562b0813419d3439a270e028e2c536fb4cfc
[tpg/acess2.git] / Kernel / vfs / mmap.c
1 /*
2  * Acess2 VFS
3  * - Open, Close and ChDir
4  */
5 #define DEBUG   1
6 #include <acess.h>
7 #include <vfs.h>
8 #include <vfs_ext.h>
9 #include <vfs_int.h>
10
11 #define MMAP_PAGES_PER_BLOCK    16
12
13 // === STRUCTURES ===
14 typedef struct sVFS_MMapPageBlock       tVFS_MMapPageBlock;
15 struct sVFS_MMapPageBlock
16 {
17         tVFS_MMapPageBlock      *Next;
18         Uint64  BaseOffset;     // Must be a multiple of MMAP_PAGES_PER_BLOCK*PAGE_SIZE
19         tPAddr  PhysAddrs[MMAP_PAGES_PER_BLOCK];
20 };
21
22 // === CODE ===
23 void *VFS_MMap(void *DestHint, size_t Length, int Protection, int Flags, int FD, Uint64 Offset)
24 {
25         tVFS_Handle     *h;
26         tVAddr  mapping_dest, mapping_base;
27          int    npages, pagenum;
28         tVFS_MMapPageBlock      *pb, *prev;
29
30         ENTER("pDestHint iLength xProtection xFlags xFD XOffset", DestHint, Length, Protection, Flags, FD, Offset);
31
32         npages = ((Offset & (PAGE_SIZE-1)) + Length + (PAGE_SIZE - 1)) / PAGE_SIZE;
33         pagenum = Offset / PAGE_SIZE;
34
35         mapping_base = (tVAddr)DestHint;
36         mapping_dest = mapping_base & ~(PAGE_SIZE-1);
37
38         // TODO: Locate space for the allocation
39
40         // Handle anonymous mappings
41         if( Flags & MMAP_MAP_ANONYMOUS )
42         {
43                 for( ; npages --; mapping_dest += PAGE_SIZE )
44                 {
45                         if( MM_GetPhysAddr(mapping_dest) ) {
46                                 // TODO: Set flags to COW if needed (well, if shared)
47                         }
48                         else {
49                                 if( !MM_Allocate(mapping_dest) ) {
50                                         // TODO: Error
51                                         Log_Warning("VFS", "VFS_MMap: Anon alloc to %p failed", mapping_dest);
52                                 }
53                                 LOG("Anon map to %p", mapping_dest);
54                         }
55                 }
56                 LEAVE_RET('p', (void*)mapping_base);
57         }
58
59         h = VFS_GetHandle(FD);
60         if( !h || !h->Node )    LEAVE_RET('n', NULL);
61
62         LOG("h = %p", h);
63         
64         Mutex_Acquire( &h->Node->Lock );
65
66         // Search for existing mapping for each page
67         // - Sorted list of 16 page blocks
68         for(
69                 pb = h->Node->MMapInfo, prev = NULL;
70                 pb && pb->BaseOffset + MMAP_PAGES_PER_BLOCK < pagenum;
71                 prev = pb, pb = pb->Next
72                 );
73
74         LOG("pb = %p, pb->BaseOffset = %X", pb, pb ? pb->BaseOffset : 0);
75
76         // - Allocate a block if needed
77         if( !pb || pb->BaseOffset > pagenum )
78         {
79                 void    *old_pb = pb;
80                 pb = malloc( sizeof(tVFS_MMapPageBlock) );
81                 if(!pb) {
82                         Mutex_Release( &h->Node->Lock );
83                         LEAVE_RET('n', NULL);
84                 }
85                 pb->Next = old_pb;
86                 pb->BaseOffset = pagenum - pagenum % MMAP_PAGES_PER_BLOCK;
87                 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
88                 if(prev)
89                         prev->Next = pb;
90                 else
91                         h->Node->MMapInfo = pb;
92         }
93
94         // - Map (and allocate) pages
95         while( npages -- )
96         {
97                 if( MM_GetPhysAddr(mapping_dest) == 0 )
98                 {
99                         if( pb->PhysAddrs[pagenum - pb->BaseOffset] == 0 )
100                         {
101                                 if( h->Node->MMap )
102                                         h->Node->MMap(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest);
103                                 else
104                                 {
105                                         // Allocate pages and read data
106                                         if( MM_Allocate(mapping_dest) == 0 ) {
107                                                 // TODO: Unwrap
108                                                 Mutex_Release( &h->Node->Lock );
109                                                 LEAVE('n');
110                                                 return NULL;
111                                         }
112                                         h->Node->Read(h->Node, pagenum*PAGE_SIZE, PAGE_SIZE, (void*)mapping_dest);
113                                 }
114                                 pb->PhysAddrs[pagenum - pb->BaseOffset] = MM_GetPhysAddr( mapping_dest );
115                                 MM_SetPageNode( pb->PhysAddrs[pagenum - pb->BaseOffset], h->Node );
116                                 LOG("Read and map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
117                                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
118                         }
119                         else
120                         {
121                                 MM_Map( mapping_dest, pb->PhysAddrs[pagenum - pb->BaseOffset] );
122                                 LOG("Cached map %X to %p (%P)", pagenum*PAGE_SIZE, mapping_dest,
123                                         pb->PhysAddrs[pagenum - pb->BaseOffset]);
124                         }
125                         h->Node->ReferenceCount ++;
126                 
127                         // Set flags
128                         if( !(Protection & MMAP_PROT_WRITE) ) {
129                                 MM_SetFlags(mapping_dest, MM_PFLAG_RO, MM_PFLAG_RO);
130                         }
131                         else {
132                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
133                         }
134                 }
135                 else
136                 {
137                         LOG("Flag update on %p", mapping_dest);
138                         if( (MM_GetFlags(mapping_dest) & MM_PFLAG_RO) && (Protection & MMAP_PROT_WRITE) )
139                         {
140                                 MM_SetFlags(mapping_dest, 0, MM_PFLAG_RO);
141                         }
142                 }
143                 if( Flags & MMAP_MAP_PRIVATE )
144                         MM_SetFlags(mapping_dest, MM_PFLAG_COW, MM_PFLAG_COW);
145                 pagenum ++;
146                 mapping_dest += PAGE_SIZE;
147
148                 // Roll on to next block if needed
149                 if(pagenum - pb->BaseOffset == MMAP_PAGES_PER_BLOCK)
150                 {
151                         if( pb->Next && pb->Next->BaseOffset == pagenum )
152                                 pb = pb->Next;
153                         else
154                         {
155                                 tVFS_MMapPageBlock      *oldpb = pb;
156                                 pb = malloc( sizeof(tVFS_MMapPageBlock) );
157                                 pb->Next = oldpb->Next;
158                                 pb->BaseOffset = pagenum;
159                                 memset(pb->PhysAddrs, 0, sizeof(pb->PhysAddrs));
160                                 oldpb->Next = pb;
161                         }
162                         pagenum = 0;
163                 }
164         }
165         
166         Mutex_Release( &h->Node->Lock );
167
168         LEAVE('p', mapping_base);
169         return (void*)mapping_base;
170 }
171
172 int VFS_MUnmap(void *Addr, size_t Length)
173 {
174         return 0;
175 }

UCC git Repository :: git.ucc.asn.au