5 * By thePowersGang (John Hodge)
7 * TODO: Convert to use spare physical pages instead
12 #define IOCACHE_USE_PAGES 1
15 typedef struct sIOCache_Ent tIOCache_Ent;
16 typedef struct sIOCache_PageInfo tIOCache_PageInfo;
28 struct sIOCache_PageInfo
30 tIOCache_PageInfo *CacheNext;
31 tIOCache_PageInfo *GlobalNext;
37 Uint32 PresentSectors;
48 tIOCache_WriteCallback Write;
52 tIOCache_PageInfo *Pages;
54 tIOCache_Ent *Entries;
59 tIOCache_PageInfo *IOCache_int_GetPage(tIOCache *Cache, Uint64 Sector, tIOCache_PageInfo **Prev, size_t *Offset);
63 tShortSpinlock glIOCache_Caches;
64 tIOCache *gIOCache_Caches = NULL;
65 int giIOCache_NumCaches = 0;
67 tIOCache_PageInfo *gIOCache_GlobalPages;
72 * \fn tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
73 * \brief Creates a new IO Cache
75 tIOCache *IOCache_Create( tIOCache_WriteCallback Write, void *ID, int SectorSize, int CacheSize )
79 if( SectorSize < 512 )
81 if( SectorSize > PAGE_SIZE )
84 // TODO: Check that SectorSize is a power of two
86 tIOCache *ret = calloc( 1, sizeof(tIOCache) );
90 ret->SectorSize = SectorSize;
91 ret->Mode = IOCACHE_WRITEBACK;
94 ret->CacheSize = CacheSize;
97 SHORTLOCK( &glIOCache_Caches );
98 ret->Next = gIOCache_Caches;
99 gIOCache_Caches = ret;
100 SHORTREL( &glIOCache_Caches );
106 #if IOCACHE_USE_PAGES
107 tIOCache_PageInfo *IOCache_int_GetPage(tIOCache *Cache, Uint64 Sector, tIOCache_PageInfo **Prev, size_t *Offset)
109 Uint64 wanted_base = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
111 *Offset = (Sector*Cache->SectorSize) % PAGE_SIZE;
113 tIOCache_PageInfo *prev = (void*)&Cache->Pages;
114 for( tIOCache_PageInfo *page = Cache->Pages; page; prev = page, page = page->CacheNext )
116 if(page->BaseOffset < wanted_base) continue;
117 if(page->BaseOffset > wanted_base) break;
127 * \fn int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
128 * \brief Read from a cached sector
130 int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
133 ENTER("pCache XSector pBuffer", Cache, Sector, Buffer);
136 if(!Cache || !Buffer) {
142 Mutex_Acquire( &Cache->Lock );
143 if(Cache->CacheSize == 0) {
144 Mutex_Release( &Cache->Lock );
149 #if IOCACHE_USE_PAGES
150 tIOCache_PageInfo *page;
152 page = IOCache_int_GetPage(Cache, Sector, NULL, &offset);
153 if( page && (page->PresentSectors & (1 << offset/Cache->SectorSize)) )
155 page->LastAccess = now();
156 char *tmp = MM_MapTemp( page->BasePhys );
157 memcpy( Buffer, tmp + offset, Cache->SectorSize );
159 Mutex_Release( &Cache->Lock );
165 for( tIOCache_Ent *ent = Cache->Entries; ent; ent = ent->Next )
167 // Have we found what we are looking for?
168 if( ent->Num == Sector ) {
169 memcpy(Buffer, ent->Data, Cache->SectorSize);
170 ent->LastAccess = now();
171 Mutex_Release( &Cache->Lock );
175 // It's a sorted list, so as soon as we go past `Sector` we know
177 if(ent->Num > Sector) break;
181 Mutex_Release( &Cache->Lock );
187 * \fn int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
188 * \brief Cache a sector
190 int IOCache_Add( tIOCache *Cache, Uint64 Sector, const void *Buffer )
194 if(!Cache || !Buffer)
198 Mutex_Acquire( &Cache->Lock );
199 if(Cache->CacheSize == 0) {
200 Mutex_Release( &Cache->Lock );
205 #if IOCACHE_USE_PAGES
208 tIOCache_PageInfo *prev;
209 tIOCache_PageInfo *page = IOCache_int_GetPage(Cache, Sector, &prev, &offset);
212 Uint32 mask = (1 << offset/Cache->SectorSize);
213 int ret = !(page->PresentSectors & mask);
216 page_map = MM_MapTemp( page->BasePhys );
217 memcpy( page_map + offset, Buffer, Cache->SectorSize );
218 MM_FreeTemp( page_map );
219 page->PresentSectors |= mask;
221 Mutex_Release( &Cache->Lock );
224 else if( Cache->CacheUsed <= Cache->CacheSize )
226 page = malloc( sizeof(tIOCache_PageInfo) );
227 page->BasePhys = MM_AllocPhys();
228 page_map = MM_MapTemp( page->BasePhys );
230 page->GlobalNext = gIOCache_GlobalPages;
231 gIOCache_GlobalPages = page;
235 tIOCache_PageInfo *oldest = Cache->Pages, *oldestPrev = NULL;
236 for( tIOCache_PageInfo *ent = Cache->Pages; ent; prev = ent, ent = ent->CacheNext )
238 if( ent->LastAccess < oldest->LastAccess ) {
243 // Remove oldest from list
244 *(oldestPrev ? &oldestPrev->CacheNext : &Cache->Pages) = oldest->CacheNext;
246 page_map = MM_MapTemp( page->BasePhys );
248 if( page->DirtySectors && Cache->Mode != IOCACHE_VIRTUAL )
250 for( int i = 0; i < PAGE_SIZE/Cache->SectorSize; i ++ )
251 Cache->Write(Cache->ID, page->BaseOffset/Cache->SectorSize+i,
252 page_map + i * Cache->SectorSize);
257 page->CacheNext = prev->CacheNext;
258 prev->CacheNext = page;
261 page->LastAccess = now();
263 page->BaseOffset = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
264 page->PresentSectors = 0;
265 page->DirtySectors = 0;
267 memcpy( page_map + offset, Buffer, Cache->SectorSize );
270 tIOCache_Ent *ent, *prev;
272 tIOCache_Ent *oldest = NULL, *oldestPrev;
273 prev = (tIOCache_Ent*)&Cache->Entries;
274 for( ent = Cache->Entries; ent; prev = ent, ent = ent->Next )
276 // Is it already here?
277 if( ent->Num == Sector ) {
278 Mutex_Release( &Cache->Lock );
282 // Check if we have found the oldest entry
283 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
289 if(ent->Num > Sector)
293 // Create the new entry
294 new = malloc( sizeof(tIOCache_Ent) + Cache->SectorSize );
297 new->LastAccess = now();
298 new->LastWrite = 0; // Zero is special, it means unmodified
299 memcpy(new->Data, Buffer, Cache->SectorSize);
301 // Have we reached the maximum cached entries?
302 if( Cache->CacheUsed == Cache->CacheSize )
304 tIOCache_Ent *savedPrev = prev;
305 oldestPrev = (tIOCache_Ent*)&Cache->Entries;
306 // If so, search for the least recently accessed entry
307 for( ; ent; prev = ent, ent = ent->Next )
309 // Check if we have found the oldest entry
310 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
316 Log_Error("IOCache", "Cache full, but also empty");
319 // Remove from list, write back and free
320 oldestPrev->Next = oldest->Next;
321 if(oldest->LastWrite && Cache->Mode != IOCACHE_VIRTUAL)
322 Cache->Write(Cache->ID, oldest->Num, oldest->Data);
325 // Decrement the used count
338 Mutex_Release( &Cache->Lock );
345 * \fn int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
346 * \brief Read from a cached sector
348 int IOCache_Write( tIOCache *Cache, Uint64 Sector, const void *Buffer )
351 if(!Cache || !Buffer)
354 Mutex_Acquire( &Cache->Lock );
355 if(Cache->CacheSize == 0) {
356 Mutex_Release( &Cache->Lock );
360 #if IOCACHE_USE_PAGES
361 tIOCache_PageInfo *page;
363 page = IOCache_int_GetPage(Cache, Sector, NULL, &offset);
364 if( page && (page->PresentSectors & (1 << offset/Cache->SectorSize)) )
367 page->LastAccess = now();
368 char *tmp = MM_MapTemp( page->BasePhys );
369 memcpy( tmp + offset, Buffer, Cache->SectorSize );
372 if(Cache->Mode == IOCACHE_WRITEBACK) {
373 Cache->Write(Cache->ID, Sector, Buffer);
376 page->DirtySectors |= (1 << offset/Cache->SectorSize);
379 Mutex_Release( &Cache->Lock );
385 for( tIOCache_Ent &ent = Cache->Entries; ent; ent = ent->Next )
387 // Have we found what we are looking for?
388 if( ent->Num == Sector ) {
389 memcpy(ent->Data, Buffer, Cache->SectorSize);
390 ent->LastAccess = ent->LastWrite = now();
392 if(Cache->Mode == IOCACHE_WRITEBACK) {
393 Cache->Write(Cache->ID, Sector, Buffer);
397 Mutex_Release( &Cache->Lock );
400 // It's a sorted list, so as soon as we go past `Sector` we know
402 if(ent->Num > Sector) break;
406 Mutex_Release( &Cache->Lock );
411 * \fn void IOCache_Flush( tIOCache *Cache )
412 * \brief Flush a cache
414 void IOCache_Flush( tIOCache *Cache )
416 if( Cache->Mode == IOCACHE_VIRTUAL ) return;
419 Mutex_Acquire( &Cache->Lock );
420 if(Cache->CacheSize == 0) {
421 Mutex_Release( &Cache->Lock );
426 #if IOCACHE_USE_PAGES
427 for( tIOCache_PageInfo *page = Cache->Pages; page; page = page->CacheNext )
430 char *page_map = MM_MapTemp( page->BasePhys );
431 if( page->DirtySectors && Cache->Mode != IOCACHE_VIRTUAL )
433 for( int i = 0; i < PAGE_SIZE/Cache->SectorSize; i ++ )
434 Cache->Write(Cache->ID, page->BaseOffset/Cache->SectorSize+i,
435 page_map + i * Cache->SectorSize);
437 MM_FreeTemp(page_map);
440 for( tIOCache_Ent *ent = Cache->Entries; ent; ent = ent->Next )
442 Cache->Write(Cache->ID, ent->Num, ent->Data);
447 Mutex_Release( &Cache->Lock );
451 * \fn void IOCache_Destroy( tIOCache *Cache )
452 * \brief Destroy a cache
454 void IOCache_Destroy( tIOCache *Cache )
456 IOCache_Flush(Cache);
459 SHORTLOCK( &glIOCache_Caches );
462 tIOCache *prev_cache = (tIOCache*)&gIOCache_Caches;
463 for(cache = gIOCache_Caches;
465 prev_cache = cache, cache = cache->Next )
468 prev_cache->Next = cache->Next;
473 SHORTREL( &glIOCache_Caches );