Kernel/x86 - Removed busy wait in MM_MapTemp
[tpg/acess2.git] / KernelLand / Kernel / drv / iocache.c
1 /*
2  * Acess2 Kernel
3  * - IO Cache
4  * 
5  * By thePowersGang (John Hodge)
6  * 
7  * TODO: Convert to use spare physical pages instead
8  */
9 #define DEBUG   0
10 #include <acess.h>
11 #include <iocache.h>
12 #define IOCACHE_USE_PAGES       1
13
14 // === TYPES ===
15 typedef struct sIOCache_Ent     tIOCache_Ent;
16 typedef struct sIOCache_PageInfo        tIOCache_PageInfo;
17
18 // === STRUCTURES ===
19 struct sIOCache_Ent
20 {
21         tIOCache_Ent    *Next;
22         Uint64  Num;
23         Sint64  LastAccess;
24         Sint64  LastWrite;
25         Uint8   Data[];
26 };
27
28 struct sIOCache_PageInfo
29 {
30         tIOCache_PageInfo       *CacheNext;
31         tIOCache_PageInfo       *GlobalNext;
32         tIOCache        *Owner;
33         Sint64  LastAccess;
34         
35         tPAddr  BasePhys;
36         Uint64  BaseOffset;
37         Uint32  PresentSectors;
38         Uint32  DirtySectors;
39 };
40
41 struct sIOCache
42 {
43         tIOCache        *Next;
44          int    SectorSize;
45         tMutex  Lock;
46          int    Mode;
47         void    *ID;
48         tIOCache_WriteCallback  Write;
49          int    CacheSize;
50          int    CacheUsed;
51         #if IOCACHE_USE_PAGES
52         tIOCache_PageInfo       *Pages;
53         #else
54         tIOCache_Ent    *Entries;
55         #endif
56 };
57
58 #if IOCACHE_USE_PAGES
59 tIOCache_PageInfo       *IOCache_int_GetPage(tIOCache *Cache, Uint64 Sector, tIOCache_PageInfo **Prev, size_t *Offset);
60 #endif
61
62 // === GLOBALS ===
63 tShortSpinlock  glIOCache_Caches;
64 tIOCache        *gIOCache_Caches = NULL;
65  int    giIOCache_NumCaches = 0;
66 #if IOCACHE_USE_PAGES
67 tIOCache_PageInfo       *gIOCache_GlobalPages;
68 #endif
69
70 // === CODE ===
71 /**
72  * \fn tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
73  * \brief Creates a new IO Cache
74  */
75 tIOCache *IOCache_Create( tIOCache_WriteCallback Write, void *ID, int SectorSize, int CacheSize )
76 {
77         if( CacheSize < 1 )
78                 return NULL;
79         if( SectorSize < 512 )
80                 return NULL;
81         if( SectorSize > PAGE_SIZE )
82                 return NULL;
83         
84         // TODO: Check that SectorSize is a power of two        
85
86         tIOCache        *ret = calloc( 1, sizeof(tIOCache) );
87         if(!ret)        return NULL;    
88
89         // Fill Structure
90         ret->SectorSize = SectorSize;
91         ret->Mode = IOCACHE_WRITEBACK;
92         ret->ID = ID;
93         ret->Write = Write;
94         ret->CacheSize = CacheSize;
95         
96         // Append to list
97         SHORTLOCK( &glIOCache_Caches );
98         ret->Next = gIOCache_Caches;
99         gIOCache_Caches = ret;
100         SHORTREL( &glIOCache_Caches );
101         
102         // Return
103         return ret;
104 }
105
106 #if IOCACHE_USE_PAGES
107 tIOCache_PageInfo *IOCache_int_GetPage(tIOCache *Cache, Uint64 Sector, tIOCache_PageInfo **Prev, size_t *Offset)
108 {
109         Uint64  wanted_base = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
110         if( Offset )
111                 *Offset = (Sector*Cache->SectorSize) % PAGE_SIZE;
112         
113         tIOCache_PageInfo *prev = (void*)&Cache->Pages;
114         for( tIOCache_PageInfo *page = Cache->Pages; page; prev = page, page = page->CacheNext )
115         {
116                 if(page->BaseOffset < wanted_base)      continue;
117                 if(page->BaseOffset > wanted_base)      break;
118                 return page;
119         }
120         if( Prev )
121                 *Prev = prev;
122         return NULL;
123 }
124 #endif
125
126 /**
127  * \fn int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
128  * \brief Read from a cached sector
129  */
130 int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
131 {
132         
133         ENTER("pCache XSector pBuffer", Cache, Sector, Buffer);
134         
135         // Sanity Check!
136         if(!Cache || !Buffer) {
137                 LEAVE('i', -1);
138                 return -1;
139         }
140         
141         // Lock
142         Mutex_Acquire( &Cache->Lock );
143         if(Cache->CacheSize == 0) {
144                 Mutex_Release( &Cache->Lock );
145                 LEAVE('i', -1);
146                 return -1;
147         }
148
149         #if IOCACHE_USE_PAGES
150         tIOCache_PageInfo       *page;
151         size_t  offset;
152         page = IOCache_int_GetPage(Cache, Sector, NULL, &offset);
153         if( page && (page->PresentSectors & (1 << offset/Cache->SectorSize)) )
154         {
155                 page->LastAccess = now();
156                 char *tmp = MM_MapTemp( page->BasePhys );
157                 memcpy( Buffer, tmp + offset, Cache->SectorSize ); 
158                 MM_FreeTemp( tmp );
159                 Mutex_Release( &Cache->Lock );
160                 LEAVE('i', 1);
161                 return 1;
162         }
163         #else   
164         // Search the list
165         for( tIOCache_Ent *ent = Cache->Entries; ent; ent = ent->Next )
166         {
167                 // Have we found what we are looking for?
168                 if( ent->Num == Sector ) {
169                         memcpy(Buffer, ent->Data, Cache->SectorSize);
170                         ent->LastAccess = now();
171                         Mutex_Release( &Cache->Lock );
172                         LEAVE('i', 1);
173                         return 1;
174                 }
175                 // It's a sorted list, so as soon as we go past `Sector` we know
176                 // it's not there
177                 if(ent->Num > Sector)   break;
178         }
179         #endif
180         
181         Mutex_Release( &Cache->Lock );
182         LEAVE('i', 0);
183         return 0;
184 }
185
186 /**
187  * \fn int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
188  * \brief Cache a sector
189  */
190 int IOCache_Add( tIOCache *Cache, Uint64 Sector, const void *Buffer )
191 {
192         
193         // Sanity Check!
194         if(!Cache || !Buffer)
195                 return -1;
196         
197         // Lock
198         Mutex_Acquire( &Cache->Lock );
199         if(Cache->CacheSize == 0) {
200                 Mutex_Release( &Cache->Lock );
201                 return -1;
202         }
203         
204         // Search the list
205         #if IOCACHE_USE_PAGES
206         char    *page_map;
207         size_t  offset;
208         tIOCache_PageInfo *prev;
209         tIOCache_PageInfo *page = IOCache_int_GetPage(Cache, Sector, &prev, &offset);
210         if( page )
211         {
212                 Uint32  mask = (1 << offset/Cache->SectorSize);
213                  int    ret = !(page->PresentSectors & mask);
214                 if( ret )
215                 {
216                         page_map = MM_MapTemp( page->BasePhys );
217                         memcpy( page_map + offset, Buffer, Cache->SectorSize ); 
218                         MM_FreeTemp( page_map );
219                         page->PresentSectors |= mask;
220                 }
221                 Mutex_Release( &Cache->Lock );
222                 return ret;
223         }
224         else if( Cache->CacheUsed <= Cache->CacheSize )
225         {
226                 page = malloc( sizeof(tIOCache_PageInfo) );
227                 page->BasePhys = MM_AllocPhys();
228                 page_map = MM_MapTemp( page->BasePhys );
229                 
230                 page->GlobalNext = gIOCache_GlobalPages;
231                 gIOCache_GlobalPages = page;
232         }
233         else
234         {
235                 tIOCache_PageInfo *oldest = Cache->Pages, *oldestPrev = NULL;
236                 for( tIOCache_PageInfo *ent = Cache->Pages; ent; prev = ent, ent = ent->CacheNext )
237                 {
238                         if( ent->LastAccess < oldest->LastAccess ) {
239                                 oldest = ent;
240                                 oldestPrev = prev;
241                         }
242                 }
243                 // Remove oldest from list
244                 *(oldestPrev ? &oldestPrev->CacheNext : &Cache->Pages) = oldest->CacheNext;
245                 page = oldest;
246                 page_map = MM_MapTemp( page->BasePhys );
247                 // Flush
248                 if( page->DirtySectors && Cache->Mode != IOCACHE_VIRTUAL )
249                 {
250                         for( int i = 0; i < PAGE_SIZE/Cache->SectorSize; i ++ )
251                                 Cache->Write(Cache->ID, page->BaseOffset/Cache->SectorSize+i,
252                                         page_map + i * Cache->SectorSize);
253                 }
254         }
255
256         // Create a new page
257         page->CacheNext = prev->CacheNext;
258         prev->CacheNext = page;
259         
260         page->Owner = Cache;
261         page->LastAccess = now();
262         
263         page->BaseOffset = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
264         page->PresentSectors = 0;
265         page->DirtySectors = 0;
266         
267         memcpy( page_map + offset, Buffer, Cache->SectorSize ); 
268         
269         #else
270         tIOCache_Ent    *ent, *prev;
271         tIOCache_Ent    *new;
272         tIOCache_Ent    *oldest = NULL, *oldestPrev;
273         prev = (tIOCache_Ent*)&Cache->Entries;
274         for( ent = Cache->Entries; ent; prev = ent, ent = ent->Next )
275         {
276                 // Is it already here?
277                 if( ent->Num == Sector ) {
278                         Mutex_Release( &Cache->Lock );
279                         return 0;
280                 }
281                 
282                 // Check if we have found the oldest entry
283                 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
284                         oldest = ent;
285                         oldestPrev = prev;
286                 }
287                 
288                 // Here we go!
289                 if(ent->Num > Sector)
290                         break;
291         }
292         
293         // Create the new entry
294         new = malloc( sizeof(tIOCache_Ent) + Cache->SectorSize );
295         new->Next = ent;
296         new->Num = Sector;
297         new->LastAccess = now();
298         new->LastWrite = 0;     // Zero is special, it means unmodified
299         memcpy(new->Data, Buffer, Cache->SectorSize);
300         
301         // Have we reached the maximum cached entries?
302         if( Cache->CacheUsed == Cache->CacheSize )
303         {
304                 tIOCache_Ent    *savedPrev = prev;
305                 oldestPrev = (tIOCache_Ent*)&Cache->Entries;
306                 // If so, search for the least recently accessed entry
307                 for( ; ent; prev = ent, ent = ent->Next )
308                 {       
309                         // Check if we have found the oldest entry
310                         if( !oldest || oldest->LastAccess > ent->LastAccess ) {
311                                 oldest = ent;
312                                 oldestPrev = prev;
313                         }
314                 }
315                 if( !oldest ) {
316                         Log_Error("IOCache", "Cache full, but also empty");
317                         return -1;
318                 }
319                 // Remove from list, write back and free
320                 oldestPrev->Next = oldest->Next;
321                 if(oldest->LastWrite && Cache->Mode != IOCACHE_VIRTUAL)
322                         Cache->Write(Cache->ID, oldest->Num, oldest->Data);
323                 free(oldest);
324                 
325                 // Decrement the used count
326                 Cache->CacheUsed --;
327                 
328                 // Restore `prev`
329                 prev = savedPrev;
330         }
331         
332         // Append to list
333         prev->Next = new;
334         Cache->CacheUsed ++;
335         #endif
336         
337         // Release Spinlock
338         Mutex_Release( &Cache->Lock );
339         
340         // Return success
341         return 1;
342 }
343
344 /**
345  * \fn int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
346  * \brief Read from a cached sector
347  */
348 int IOCache_Write( tIOCache *Cache, Uint64 Sector, const void *Buffer )
349 {
350         // Sanity Check!
351         if(!Cache || !Buffer)
352                 return -1;
353         // Lock
354         Mutex_Acquire( &Cache->Lock );
355         if(Cache->CacheSize == 0) {
356                 Mutex_Release( &Cache->Lock );
357                 return -1;
358         }
359         
360         #if IOCACHE_USE_PAGES
361         tIOCache_PageInfo       *page;
362         size_t  offset;
363         page = IOCache_int_GetPage(Cache, Sector, NULL, &offset);
364         if( page && (page->PresentSectors & (1 << offset/Cache->SectorSize)) )
365         {
366                 
367                 page->LastAccess = now();
368                 char *tmp = MM_MapTemp( page->BasePhys );
369                 memcpy( tmp + offset, Buffer, Cache->SectorSize );
370                 MM_FreeTemp( tmp );
371                 
372                 if(Cache->Mode == IOCACHE_WRITEBACK) {
373                         Cache->Write(Cache->ID, Sector, Buffer);
374                 }
375                 else {
376                         page->DirtySectors |= (1 << offset/Cache->SectorSize);
377                 }
378                 
379                 Mutex_Release( &Cache->Lock );
380                 LEAVE('i', 1);
381                 return 1;
382         }
383         #else   
384         // Search the list
385         for( tIOCache_Ent &ent = Cache->Entries; ent; ent = ent->Next )
386         {
387                 // Have we found what we are looking for?
388                 if( ent->Num == Sector ) {
389                         memcpy(ent->Data, Buffer, Cache->SectorSize);
390                         ent->LastAccess = ent->LastWrite = now();
391                         
392                         if(Cache->Mode == IOCACHE_WRITEBACK) {
393                                 Cache->Write(Cache->ID, Sector, Buffer);
394                                 ent->LastWrite = 0;
395                         }
396                         
397                         Mutex_Release( &Cache->Lock );
398                         return 1;
399                 }
400                 // It's a sorted list, so as soon as we go past `Sector` we know
401                 // it's not there
402                 if(ent->Num > Sector)   break;
403         }
404         #endif
405         
406         Mutex_Release( &Cache->Lock );
407         return 0;
408 }
409
410 /**
411  * \fn void IOCache_Flush( tIOCache *Cache )
412  * \brief Flush a cache
413  */
414 void IOCache_Flush( tIOCache *Cache )
415 {
416         if( Cache->Mode == IOCACHE_VIRTUAL )    return;
417         
418         // Lock
419         Mutex_Acquire( &Cache->Lock );
420         if(Cache->CacheSize == 0) {
421                 Mutex_Release( &Cache->Lock );
422                 return;
423         }
424         
425         // Write All
426         #if IOCACHE_USE_PAGES
427         for( tIOCache_PageInfo *page = Cache->Pages; page; page = page->CacheNext )
428         {
429                 // Flush
430                 char *page_map = MM_MapTemp( page->BasePhys );
431                 if( page->DirtySectors && Cache->Mode != IOCACHE_VIRTUAL )
432                 {
433                         for( int i = 0; i < PAGE_SIZE/Cache->SectorSize; i ++ )
434                                 Cache->Write(Cache->ID, page->BaseOffset/Cache->SectorSize+i,
435                                         page_map + i * Cache->SectorSize);
436                 }
437                 MM_FreeTemp(page_map);
438         }
439         #else
440         for( tIOCache_Ent *ent = Cache->Entries; ent; ent = ent->Next )
441         {
442                 Cache->Write(Cache->ID, ent->Num, ent->Data);
443                 ent->LastWrite = 0;
444         }
445         #endif
446         
447         Mutex_Release( &Cache->Lock );
448 }
449
450 /**
451  * \fn void IOCache_Destroy( tIOCache *Cache )
452  * \brief Destroy a cache
453  */
454 void IOCache_Destroy( tIOCache *Cache )
455 {
456         IOCache_Flush(Cache);
457         
458         // Remove from list
459         SHORTLOCK( &glIOCache_Caches );
460         {
461                 tIOCache        *cache;
462                 tIOCache        *prev_cache = (tIOCache*)&gIOCache_Caches;
463                 for(cache = gIOCache_Caches;
464                         cache;
465                         prev_cache = cache, cache = cache->Next )
466                 {
467                         if(cache == Cache) {
468                                 prev_cache->Next = cache->Next;
469                                 break;
470                         }
471                 }
472         }
473         SHORTREL( &glIOCache_Caches );
474         
475         free(Cache);
476 }

UCC git Repository :: git.ucc.asn.au