Kernel/IOCache - Fixed IOCache eating all temp mappings
[tpg/acess2.git] / KernelLand / Kernel / drv / iocache.c
1 /*
2  * Acess2 Kernel
3  * - By thePowersGang (John Hodge)
4  * 
5  * drv/iocache.c
6  * - Block IO Caching
7  */
8 #define DEBUG   0
9 #include <acess.h>
10 #include <iocache.h>
11 #define IOCACHE_USE_PAGES       1
12
13 // === TYPES ===
14 typedef struct sIOCache_Ent     tIOCache_Ent;
15 typedef struct sIOCache_PageInfo        tIOCache_PageInfo;
16
17 // === STRUCTURES ===
18 struct sIOCache_Ent
19 {
20         tIOCache_Ent    *Next;
21         Uint64  Num;
22         Sint64  LastAccess;
23         Sint64  LastWrite;
24         Uint8   Data[];
25 };
26
27 struct sIOCache_PageInfo
28 {
29         tIOCache_PageInfo       *CacheNext;
30         tIOCache_PageInfo       *GlobalNext;
31         tIOCache        *Owner;
32         Sint64  LastAccess;
33         
34         tPAddr  BasePhys;
35         Uint64  BaseOffset;
36         Uint32  PresentSectors;
37         Uint32  DirtySectors;
38 };
39
40 struct sIOCache
41 {
42         tIOCache        *Next;
43          int    SectorSize;
44         tMutex  Lock;
45          int    Mode;
46         void    *ID;
47         tIOCache_WriteCallback  Write;
48          int    CacheSize;
49          int    CacheUsed;
50         #if IOCACHE_USE_PAGES
51         tIOCache_PageInfo       *Pages;
52         #else
53         tIOCache_Ent    *Entries;
54         #endif
55 };
56
57 #if IOCACHE_USE_PAGES
58 tIOCache_PageInfo       *IOCache_int_GetPage(tIOCache *Cache, Uint64 Sector, tIOCache_PageInfo **Prev, size_t *Offset);
59 #endif
60
61 // === GLOBALS ===
62 tShortSpinlock  glIOCache_Caches;
63 tIOCache        *gIOCache_Caches = NULL;
64  int    giIOCache_NumCaches = 0;
65 #if IOCACHE_USE_PAGES
66 tIOCache_PageInfo       *gIOCache_GlobalPages;
67 #endif
68
69 // === CODE ===
70 /**
71  * \fn tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
72  * \brief Creates a new IO Cache
73  */
74 tIOCache *IOCache_Create( tIOCache_WriteCallback Write, void *ID, int SectorSize, int CacheSize )
75 {
76         if( CacheSize < 1 )
77                 return NULL;
78         if( SectorSize < 512 )
79                 return NULL;
80         if( SectorSize > PAGE_SIZE )
81                 return NULL;
82         
83         // TODO: Check that SectorSize is a power of two        
84
85         tIOCache        *ret = calloc( 1, sizeof(tIOCache) );
86         if(!ret)        return NULL;    
87
88         // Fill Structure
89         ret->SectorSize = SectorSize;
90         ret->Mode = IOCACHE_WRITEBACK;
91         ret->ID = ID;
92         ret->Write = Write;
93         ret->CacheSize = CacheSize;
94         
95         // Append to list
96         SHORTLOCK( &glIOCache_Caches );
97         ret->Next = gIOCache_Caches;
98         gIOCache_Caches = ret;
99         SHORTREL( &glIOCache_Caches );
100         
101         // Return
102         return ret;
103 }
104
105 #if IOCACHE_USE_PAGES
106 tIOCache_PageInfo *IOCache_int_GetPage(tIOCache *Cache, Uint64 Sector, tIOCache_PageInfo **Prev, size_t *Offset)
107 {
108         Uint64  wanted_base = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
109         if( Offset )
110                 *Offset = (Sector*Cache->SectorSize) % PAGE_SIZE;
111         
112         tIOCache_PageInfo *prev = (void*)&Cache->Pages;
113         for( tIOCache_PageInfo *page = Cache->Pages; page; prev = page, page = page->CacheNext )
114         {
115                 if(page->BaseOffset < wanted_base)      continue;
116                 if(page->BaseOffset > wanted_base)      break;
117                 return page;
118         }
119         if( Prev )
120                 *Prev = prev;
121         return NULL;
122 }
123 #endif
124
125 /**
126  * \fn int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
127  * \brief Read from a cached sector
128  */
129 int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
130 {
131         
132         ENTER("pCache XSector pBuffer", Cache, Sector, Buffer);
133         
134         // Sanity Check!
135         if(!Cache || !Buffer) {
136                 LEAVE('i', -1);
137                 return -1;
138         }
139         
140         // Lock
141         Mutex_Acquire( &Cache->Lock );
142         if(Cache->CacheSize == 0) {
143                 Mutex_Release( &Cache->Lock );
144                 LEAVE('i', -1);
145                 return -1;
146         }
147
148         #if IOCACHE_USE_PAGES
149         tIOCache_PageInfo       *page;
150         size_t  offset;
151         page = IOCache_int_GetPage(Cache, Sector, NULL, &offset);
152         if( page && (page->PresentSectors & (1 << offset/Cache->SectorSize)) )
153         {
154                 page->LastAccess = now();
155                 char *tmp = MM_MapTemp( page->BasePhys );
156                 memcpy( Buffer, tmp + offset, Cache->SectorSize ); 
157                 MM_FreeTemp( tmp );
158                 Mutex_Release( &Cache->Lock );
159                 LEAVE('i', 1);
160                 return 1;
161         }
162         #else   
163         // Search the list
164         for( tIOCache_Ent *ent = Cache->Entries; ent; ent = ent->Next )
165         {
166                 // Have we found what we are looking for?
167                 if( ent->Num == Sector ) {
168                         memcpy(Buffer, ent->Data, Cache->SectorSize);
169                         ent->LastAccess = now();
170                         Mutex_Release( &Cache->Lock );
171                         LEAVE('i', 1);
172                         return 1;
173                 }
174                 // It's a sorted list, so as soon as we go past `Sector` we know
175                 // it's not there
176                 if(ent->Num > Sector)   break;
177         }
178         #endif
179         
180         Mutex_Release( &Cache->Lock );
181         LEAVE('i', 0);
182         return 0;
183 }
184
185 /**
186  * \fn int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
187  * \brief Cache a sector
188  */
189 int IOCache_Add( tIOCache *Cache, Uint64 Sector, const void *Buffer )
190 {
191         
192         // Sanity Check!
193         if(!Cache || !Buffer)
194                 return -1;
195         
196         // Lock
197         Mutex_Acquire( &Cache->Lock );
198         if(Cache->CacheSize == 0) {
199                 Mutex_Release( &Cache->Lock );
200                 return -1;
201         }
202         
203         // Search the list
204         #if IOCACHE_USE_PAGES
205         char    *page_map;
206         size_t  offset;
207         tIOCache_PageInfo *prev;
208         tIOCache_PageInfo *page = IOCache_int_GetPage(Cache, Sector, &prev, &offset);
209         if( page )
210         {
211                 Uint32  mask = (1 << offset/Cache->SectorSize);
212                  int    ret = !(page->PresentSectors & mask);
213                 if( ret )
214                 {
215                         page_map = MM_MapTemp( page->BasePhys );
216                         memcpy( page_map + offset, Buffer, Cache->SectorSize ); 
217                         MM_FreeTemp( page_map );
218                         page->PresentSectors |= mask;
219                 }
220                 Mutex_Release( &Cache->Lock );
221                 return ret;
222         }
223         else if( Cache->CacheUsed <= Cache->CacheSize )
224         {
225                 page = malloc( sizeof(tIOCache_PageInfo) );
226                 page->BasePhys = MM_AllocPhys();
227                 page_map = MM_MapTemp( page->BasePhys );
228                 
229                 page->GlobalNext = gIOCache_GlobalPages;
230                 gIOCache_GlobalPages = page;
231         }
232         else
233         {
234                 tIOCache_PageInfo *oldest = Cache->Pages, *oldestPrev = NULL;
235                 for( tIOCache_PageInfo *ent = Cache->Pages; ent; prev = ent, ent = ent->CacheNext )
236                 {
237                         if( ent->LastAccess < oldest->LastAccess ) {
238                                 oldest = ent;
239                                 oldestPrev = prev;
240                         }
241                 }
242                 // Remove oldest from list
243                 *(oldestPrev ? &oldestPrev->CacheNext : &Cache->Pages) = oldest->CacheNext;
244                 page = oldest;
245                 page_map = MM_MapTemp( page->BasePhys );
246                 // Flush
247                 if( page->DirtySectors && Cache->Mode != IOCACHE_VIRTUAL )
248                 {
249                         for( int i = 0; i < PAGE_SIZE/Cache->SectorSize; i ++ )
250                                 Cache->Write(Cache->ID, page->BaseOffset/Cache->SectorSize+i,
251                                         page_map + i * Cache->SectorSize);
252                 }
253         }
254
255         // Create a new page
256         page->CacheNext = prev->CacheNext;
257         prev->CacheNext = page;
258         
259         page->Owner = Cache;
260         page->LastAccess = now();
261         
262         page->BaseOffset = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
263         page->PresentSectors = 0;
264         page->DirtySectors = 0;
265         
266         memcpy( page_map + offset, Buffer, Cache->SectorSize ); 
267
268         MM_FreeTemp(page_map);
269         
270         #else
271         tIOCache_Ent    *ent, *prev;
272         tIOCache_Ent    *new;
273         tIOCache_Ent    *oldest = NULL, *oldestPrev;
274         prev = (tIOCache_Ent*)&Cache->Entries;
275         for( ent = Cache->Entries; ent; prev = ent, ent = ent->Next )
276         {
277                 // Is it already here?
278                 if( ent->Num == Sector ) {
279                         Mutex_Release( &Cache->Lock );
280                         return 0;
281                 }
282                 
283                 // Check if we have found the oldest entry
284                 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
285                         oldest = ent;
286                         oldestPrev = prev;
287                 }
288                 
289                 // Here we go!
290                 if(ent->Num > Sector)
291                         break;
292         }
293         
294         // Create the new entry
295         new = malloc( sizeof(tIOCache_Ent) + Cache->SectorSize );
296         new->Next = ent;
297         new->Num = Sector;
298         new->LastAccess = now();
299         new->LastWrite = 0;     // Zero is special, it means unmodified
300         memcpy(new->Data, Buffer, Cache->SectorSize);
301         
302         // Have we reached the maximum cached entries?
303         if( Cache->CacheUsed == Cache->CacheSize )
304         {
305                 tIOCache_Ent    *savedPrev = prev;
306                 oldestPrev = (tIOCache_Ent*)&Cache->Entries;
307                 // If so, search for the least recently accessed entry
308                 for( ; ent; prev = ent, ent = ent->Next )
309                 {       
310                         // Check if we have found the oldest entry
311                         if( !oldest || oldest->LastAccess > ent->LastAccess ) {
312                                 oldest = ent;
313                                 oldestPrev = prev;
314                         }
315                 }
316                 if( !oldest ) {
317                         Log_Error("IOCache", "Cache full, but also empty");
318                         return -1;
319                 }
320                 // Remove from list, write back and free
321                 oldestPrev->Next = oldest->Next;
322                 if(oldest->LastWrite && Cache->Mode != IOCACHE_VIRTUAL)
323                         Cache->Write(Cache->ID, oldest->Num, oldest->Data);
324                 free(oldest);
325                 
326                 // Decrement the used count
327                 Cache->CacheUsed --;
328                 
329                 // Restore `prev`
330                 prev = savedPrev;
331         }
332         
333         // Append to list
334         prev->Next = new;
335         Cache->CacheUsed ++;
336         #endif
337         
338         // Release Spinlock
339         Mutex_Release( &Cache->Lock );
340         
341         // Return success
342         return 1;
343 }
344
345 /**
346  * \fn int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
347  * \brief Read from a cached sector
348  */
349 int IOCache_Write( tIOCache *Cache, Uint64 Sector, const void *Buffer )
350 {
351         // Sanity Check!
352         if(!Cache || !Buffer)
353                 return -1;
354         // Lock
355         Mutex_Acquire( &Cache->Lock );
356         if(Cache->CacheSize == 0) {
357                 Mutex_Release( &Cache->Lock );
358                 return -1;
359         }
360         
361         #if IOCACHE_USE_PAGES
362         tIOCache_PageInfo       *page;
363         size_t  offset;
364         page = IOCache_int_GetPage(Cache, Sector, NULL, &offset);
365         if( page && (page->PresentSectors & (1 << offset/Cache->SectorSize)) )
366         {
367                 
368                 page->LastAccess = now();
369                 char *tmp = MM_MapTemp( page->BasePhys );
370                 memcpy( tmp + offset, Buffer, Cache->SectorSize );
371                 MM_FreeTemp( tmp );
372                 
373                 if(Cache->Mode == IOCACHE_WRITEBACK) {
374                         Cache->Write(Cache->ID, Sector, Buffer);
375                 }
376                 else {
377                         page->DirtySectors |= (1 << offset/Cache->SectorSize);
378                 }
379                 
380                 Mutex_Release( &Cache->Lock );
381                 LEAVE('i', 1);
382                 return 1;
383         }
384         #else   
385         // Search the list
386         for( tIOCache_Ent &ent = Cache->Entries; ent; ent = ent->Next )
387         {
388                 // Have we found what we are looking for?
389                 if( ent->Num == Sector ) {
390                         memcpy(ent->Data, Buffer, Cache->SectorSize);
391                         ent->LastAccess = ent->LastWrite = now();
392                         
393                         if(Cache->Mode == IOCACHE_WRITEBACK) {
394                                 Cache->Write(Cache->ID, Sector, Buffer);
395                                 ent->LastWrite = 0;
396                         }
397                         
398                         Mutex_Release( &Cache->Lock );
399                         return 1;
400                 }
401                 // It's a sorted list, so as soon as we go past `Sector` we know
402                 // it's not there
403                 if(ent->Num > Sector)   break;
404         }
405         #endif
406         
407         Mutex_Release( &Cache->Lock );
408         return 0;
409 }
410
411 /**
412  * \fn void IOCache_Flush( tIOCache *Cache )
413  * \brief Flush a cache
414  */
415 void IOCache_Flush( tIOCache *Cache )
416 {
417         if( Cache->Mode == IOCACHE_VIRTUAL )    return;
418         
419         // Lock
420         Mutex_Acquire( &Cache->Lock );
421         if(Cache->CacheSize == 0) {
422                 Mutex_Release( &Cache->Lock );
423                 return;
424         }
425         
426         // Write All
427         #if IOCACHE_USE_PAGES
428         for( tIOCache_PageInfo *page = Cache->Pages; page; page = page->CacheNext )
429         {
430                 // Flush
431                 char *page_map = MM_MapTemp( page->BasePhys );
432                 if( page->DirtySectors && Cache->Mode != IOCACHE_VIRTUAL )
433                 {
434                         for( int i = 0; i < PAGE_SIZE/Cache->SectorSize; i ++ )
435                                 Cache->Write(Cache->ID, page->BaseOffset/Cache->SectorSize+i,
436                                         page_map + i * Cache->SectorSize);
437                 }
438                 MM_FreeTemp(page_map);
439         }
440         #else
441         for( tIOCache_Ent *ent = Cache->Entries; ent; ent = ent->Next )
442         {
443                 Cache->Write(Cache->ID, ent->Num, ent->Data);
444                 ent->LastWrite = 0;
445         }
446         #endif
447         
448         Mutex_Release( &Cache->Lock );
449 }
450
451 /**
452  * \fn void IOCache_Destroy( tIOCache *Cache )
453  * \brief Destroy a cache
454  */
455 void IOCache_Destroy( tIOCache *Cache )
456 {
457         IOCache_Flush(Cache);
458         
459         // Remove from list
460         SHORTLOCK( &glIOCache_Caches );
461         {
462                 tIOCache        *cache;
463                 tIOCache        *prev_cache = (tIOCache*)&gIOCache_Caches;
464                 for(cache = gIOCache_Caches;
465                         cache;
466                         prev_cache = cache, cache = cache->Next )
467                 {
468                         if(cache == Cache) {
469                                 prev_cache->Next = cache->Next;
470                                 break;
471                         }
472                 }
473         }
474         SHORTREL( &glIOCache_Caches );
475         
476         free(Cache);
477 }

UCC git Repository :: git.ucc.asn.au