3 * - By thePowersGang (John Hodge)
11 #define IOCACHE_USE_PAGES 1
14 typedef struct sIOCache_Ent tIOCache_Ent;
15 typedef struct sIOCache_PageInfo tIOCache_PageInfo;
27 struct sIOCache_PageInfo
29 tIOCache_PageInfo *CacheNext;
30 tIOCache_PageInfo *GlobalNext;
36 Uint32 PresentSectors;
47 tIOCache_WriteCallback Write;
51 tIOCache_PageInfo *Pages;
53 tIOCache_Ent *Entries;
58 tIOCache_PageInfo *IOCache_int_GetPage(tIOCache *Cache, Uint64 Sector, tIOCache_PageInfo **Prev, size_t *Offset);
62 tShortSpinlock glIOCache_Caches;
63 tIOCache *gIOCache_Caches = NULL;
64 int giIOCache_NumCaches = 0;
66 tIOCache_PageInfo *gIOCache_GlobalPages;
71 * \fn tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
72 * \brief Creates a new IO Cache
74 tIOCache *IOCache_Create( tIOCache_WriteCallback Write, void *ID, int SectorSize, int CacheSize )
78 if( SectorSize < 512 )
80 if( SectorSize > PAGE_SIZE )
83 // TODO: Check that SectorSize is a power of two
85 tIOCache *ret = calloc( 1, sizeof(tIOCache) );
89 ret->SectorSize = SectorSize;
90 ret->Mode = IOCACHE_WRITEBACK;
93 ret->CacheSize = CacheSize;
96 SHORTLOCK( &glIOCache_Caches );
97 ret->Next = gIOCache_Caches;
98 gIOCache_Caches = ret;
99 SHORTREL( &glIOCache_Caches );
105 #if IOCACHE_USE_PAGES
106 tIOCache_PageInfo *IOCache_int_GetPage(tIOCache *Cache, Uint64 Sector, tIOCache_PageInfo **Prev, size_t *Offset)
108 Uint64 wanted_base = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
110 *Offset = (Sector*Cache->SectorSize) % PAGE_SIZE;
112 tIOCache_PageInfo *prev = (void*)&Cache->Pages;
113 for( tIOCache_PageInfo *page = Cache->Pages; page; prev = page, page = page->CacheNext )
115 if(page->BaseOffset < wanted_base) continue;
116 if(page->BaseOffset > wanted_base) break;
126 * \fn int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
127 * \brief Read from a cached sector
129 int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
132 ENTER("pCache XSector pBuffer", Cache, Sector, Buffer);
135 if(!Cache || !Buffer) {
141 Mutex_Acquire( &Cache->Lock );
142 if(Cache->CacheSize == 0) {
143 Mutex_Release( &Cache->Lock );
148 #if IOCACHE_USE_PAGES
149 tIOCache_PageInfo *page;
151 page = IOCache_int_GetPage(Cache, Sector, NULL, &offset);
152 if( page && (page->PresentSectors & (1 << offset/Cache->SectorSize)) )
154 page->LastAccess = now();
155 char *tmp = MM_MapTemp( page->BasePhys );
156 memcpy( Buffer, tmp + offset, Cache->SectorSize );
158 Mutex_Release( &Cache->Lock );
164 for( tIOCache_Ent *ent = Cache->Entries; ent; ent = ent->Next )
166 // Have we found what we are looking for?
167 if( ent->Num == Sector ) {
168 memcpy(Buffer, ent->Data, Cache->SectorSize);
169 ent->LastAccess = now();
170 Mutex_Release( &Cache->Lock );
174 // It's a sorted list, so as soon as we go past `Sector` we know
176 if(ent->Num > Sector) break;
180 Mutex_Release( &Cache->Lock );
186 * \fn int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
187 * \brief Cache a sector
189 int IOCache_Add( tIOCache *Cache, Uint64 Sector, const void *Buffer )
193 if(!Cache || !Buffer)
197 Mutex_Acquire( &Cache->Lock );
198 if(Cache->CacheSize == 0) {
199 Mutex_Release( &Cache->Lock );
204 #if IOCACHE_USE_PAGES
207 tIOCache_PageInfo *prev;
208 tIOCache_PageInfo *page = IOCache_int_GetPage(Cache, Sector, &prev, &offset);
211 Uint32 mask = (1 << offset/Cache->SectorSize);
212 int ret = !(page->PresentSectors & mask);
215 page_map = MM_MapTemp( page->BasePhys );
216 memcpy( page_map + offset, Buffer, Cache->SectorSize );
217 MM_FreeTemp( page_map );
218 page->PresentSectors |= mask;
220 Mutex_Release( &Cache->Lock );
223 else if( Cache->CacheUsed <= Cache->CacheSize )
225 page = malloc( sizeof(tIOCache_PageInfo) );
226 page->BasePhys = MM_AllocPhys();
227 page_map = MM_MapTemp( page->BasePhys );
229 page->GlobalNext = gIOCache_GlobalPages;
230 gIOCache_GlobalPages = page;
234 tIOCache_PageInfo *oldest = Cache->Pages, *oldestPrev = NULL;
235 for( tIOCache_PageInfo *ent = Cache->Pages; ent; prev = ent, ent = ent->CacheNext )
237 if( ent->LastAccess < oldest->LastAccess ) {
242 // Remove oldest from list
243 *(oldestPrev ? &oldestPrev->CacheNext : &Cache->Pages) = oldest->CacheNext;
245 page_map = MM_MapTemp( page->BasePhys );
247 if( page->DirtySectors && Cache->Mode != IOCACHE_VIRTUAL )
249 for( int i = 0; i < PAGE_SIZE/Cache->SectorSize; i ++ )
250 Cache->Write(Cache->ID, page->BaseOffset/Cache->SectorSize+i,
251 page_map + i * Cache->SectorSize);
256 page->CacheNext = prev->CacheNext;
257 prev->CacheNext = page;
260 page->LastAccess = now();
262 page->BaseOffset = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
263 page->PresentSectors = 0;
264 page->DirtySectors = 0;
266 memcpy( page_map + offset, Buffer, Cache->SectorSize );
268 MM_FreeTemp(page_map);
271 tIOCache_Ent *ent, *prev;
273 tIOCache_Ent *oldest = NULL, *oldestPrev;
274 prev = (tIOCache_Ent*)&Cache->Entries;
275 for( ent = Cache->Entries; ent; prev = ent, ent = ent->Next )
277 // Is it already here?
278 if( ent->Num == Sector ) {
279 Mutex_Release( &Cache->Lock );
283 // Check if we have found the oldest entry
284 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
290 if(ent->Num > Sector)
294 // Create the new entry
295 new = malloc( sizeof(tIOCache_Ent) + Cache->SectorSize );
298 new->LastAccess = now();
299 new->LastWrite = 0; // Zero is special, it means unmodified
300 memcpy(new->Data, Buffer, Cache->SectorSize);
302 // Have we reached the maximum cached entries?
303 if( Cache->CacheUsed == Cache->CacheSize )
305 tIOCache_Ent *savedPrev = prev;
306 oldestPrev = (tIOCache_Ent*)&Cache->Entries;
307 // If so, search for the least recently accessed entry
308 for( ; ent; prev = ent, ent = ent->Next )
310 // Check if we have found the oldest entry
311 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
317 Log_Error("IOCache", "Cache full, but also empty");
320 // Remove from list, write back and free
321 oldestPrev->Next = oldest->Next;
322 if(oldest->LastWrite && Cache->Mode != IOCACHE_VIRTUAL)
323 Cache->Write(Cache->ID, oldest->Num, oldest->Data);
326 // Decrement the used count
339 Mutex_Release( &Cache->Lock );
346 * \fn int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
347 * \brief Read from a cached sector
349 int IOCache_Write( tIOCache *Cache, Uint64 Sector, const void *Buffer )
352 if(!Cache || !Buffer)
355 Mutex_Acquire( &Cache->Lock );
356 if(Cache->CacheSize == 0) {
357 Mutex_Release( &Cache->Lock );
361 #if IOCACHE_USE_PAGES
362 tIOCache_PageInfo *page;
364 page = IOCache_int_GetPage(Cache, Sector, NULL, &offset);
365 if( page && (page->PresentSectors & (1 << offset/Cache->SectorSize)) )
368 page->LastAccess = now();
369 char *tmp = MM_MapTemp( page->BasePhys );
370 memcpy( tmp + offset, Buffer, Cache->SectorSize );
373 if(Cache->Mode == IOCACHE_WRITEBACK) {
374 Cache->Write(Cache->ID, Sector, Buffer);
377 page->DirtySectors |= (1 << offset/Cache->SectorSize);
380 Mutex_Release( &Cache->Lock );
386 for( tIOCache_Ent &ent = Cache->Entries; ent; ent = ent->Next )
388 // Have we found what we are looking for?
389 if( ent->Num == Sector ) {
390 memcpy(ent->Data, Buffer, Cache->SectorSize);
391 ent->LastAccess = ent->LastWrite = now();
393 if(Cache->Mode == IOCACHE_WRITEBACK) {
394 Cache->Write(Cache->ID, Sector, Buffer);
398 Mutex_Release( &Cache->Lock );
401 // It's a sorted list, so as soon as we go past `Sector` we know
403 if(ent->Num > Sector) break;
407 Mutex_Release( &Cache->Lock );
412 * \fn void IOCache_Flush( tIOCache *Cache )
413 * \brief Flush a cache
415 void IOCache_Flush( tIOCache *Cache )
417 if( Cache->Mode == IOCACHE_VIRTUAL ) return;
420 Mutex_Acquire( &Cache->Lock );
421 if(Cache->CacheSize == 0) {
422 Mutex_Release( &Cache->Lock );
427 #if IOCACHE_USE_PAGES
428 for( tIOCache_PageInfo *page = Cache->Pages; page; page = page->CacheNext )
431 char *page_map = MM_MapTemp( page->BasePhys );
432 if( page->DirtySectors && Cache->Mode != IOCACHE_VIRTUAL )
434 for( int i = 0; i < PAGE_SIZE/Cache->SectorSize; i ++ )
435 Cache->Write(Cache->ID, page->BaseOffset/Cache->SectorSize+i,
436 page_map + i * Cache->SectorSize);
438 MM_FreeTemp(page_map);
441 for( tIOCache_Ent *ent = Cache->Entries; ent; ent = ent->Next )
443 Cache->Write(Cache->ID, ent->Num, ent->Data);
448 Mutex_Release( &Cache->Lock );
452 * \fn void IOCache_Destroy( tIOCache *Cache )
453 * \brief Destroy a cache
455 void IOCache_Destroy( tIOCache *Cache )
457 IOCache_Flush(Cache);
460 SHORTLOCK( &glIOCache_Caches );
463 tIOCache *prev_cache = (tIOCache*)&gIOCache_Caches;
464 for(cache = gIOCache_Caches;
466 prev_cache = cache, cache = cache->Next )
469 prev_cache->Next = cache->Next;
474 SHORTREL( &glIOCache_Caches );