5 * By thePowersGang (John Hodge)
7 * TODO: Convert to use spare physical pages instead
14 typedef struct sIOCache_Ent tIOCache_Ent;
15 typedef struct sIOCache_PageInfo tIOCache_PageInfo;
27 struct sIOCache_PageInfo
29 tIOCache_PageInfo *GlobalNext;
30 tIOCache_PageInfo *CacheNext;
43 tIOCache_WriteCallback Write;
46 tIOCache_Ent *Entries;
50 tShortSpinlock glIOCache_Caches;
51 tIOCache *gIOCache_Caches = NULL;
52 int giIOCache_NumCaches = 0;
53 tIOCache_PageInfo *gIOCache_GlobalPages;
57 * \fn tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
58 * \brief Creates a new IO Cache
60 tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
62 tIOCache *ret = calloc( 1, sizeof(tIOCache) );
68 ret->SectorSize = SectorSize;
69 ret->Mode = IOCACHE_WRITEBACK;
72 ret->CacheSize = CacheSize;
75 SHORTLOCK( &glIOCache_Caches );
76 ret->Next = gIOCache_Caches;
77 gIOCache_Caches = ret;
78 SHORTREL( &glIOCache_Caches );
85 * \fn int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
86 * \brief Read from a cached sector
88 int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
92 ENTER("pCache XSector pBuffer", Cache, Sector, Buffer);
95 if(!Cache || !Buffer) {
101 Mutex_Acquire( &Cache->Lock );
102 if(Cache->CacheSize == 0) {
103 Mutex_Release( &Cache->Lock );
109 for( ent = Cache->Entries; ent; ent = ent->Next )
111 // Have we found what we are looking for?
112 if( ent->Num == Sector ) {
113 memcpy(Buffer, ent->Data, Cache->SectorSize);
114 ent->LastAccess = now();
115 Mutex_Release( &Cache->Lock );
119 // It's a sorted list, so as soon as we go past `Sector` we know
121 if(ent->Num > Sector) break;
124 Mutex_Release( &Cache->Lock );
130 * \fn int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
131 * \brief Cache a sector
133 int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
135 tIOCache_Ent *ent, *prev;
137 tIOCache_Ent *oldest = NULL, *oldestPrev;
140 if(!Cache || !Buffer)
144 Mutex_Acquire( &Cache->Lock );
145 if(Cache->CacheSize == 0) {
146 Mutex_Release( &Cache->Lock );
151 prev = (tIOCache_Ent*)&Cache->Entries;
152 for( ent = Cache->Entries; ent; prev = ent, ent = ent->Next )
154 // Is it already here?
155 if( ent->Num == Sector ) {
156 Mutex_Release( &Cache->Lock );
160 // Check if we have found the oldest entry
161 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
167 if(ent->Num > Sector)
171 // Create the new entry
172 new = malloc( sizeof(tIOCache_Ent) + Cache->SectorSize );
175 new->LastAccess = now();
176 new->LastWrite = 0; // Zero is special, it means unmodified
177 memcpy(new->Data, Buffer, Cache->SectorSize);
179 // Have we reached the maximum cached entries?
180 if( Cache->CacheUsed == Cache->CacheSize )
182 tIOCache_Ent *savedPrev = prev;
183 oldestPrev = (tIOCache_Ent*)&Cache->Entries;
184 // If so, search for the least recently accessed entry
185 for( ; ent; prev = ent, ent = ent->Next )
187 // Check if we have found the oldest entry
188 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
194 Log_Error("IOCache", "Cache full, but also empty");
197 // Remove from list, write back and free
198 oldestPrev->Next = oldest->Next;
199 if(oldest->LastWrite && Cache->Mode != IOCACHE_VIRTUAL)
200 Cache->Write(Cache->ID, oldest->Num, oldest->Data);
203 // Decrement the used count
215 Mutex_Release( &Cache->Lock );
222 * \fn int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
223 * \brief Read from a cached sector
225 int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
230 if(!Cache || !Buffer)
233 Mutex_Acquire( &Cache->Lock );
234 if(Cache->CacheSize == 0) {
235 Mutex_Release( &Cache->Lock );
240 for( ent = Cache->Entries; ent; ent = ent->Next )
242 // Have we found what we are looking for?
243 if( ent->Num == Sector ) {
244 memcpy(ent->Data, Buffer, Cache->SectorSize);
245 ent->LastAccess = ent->LastWrite = now();
247 if(Cache->Mode == IOCACHE_WRITEBACK) {
248 Cache->Write(Cache->ID, Sector, Buffer);
252 Mutex_Release( &Cache->Lock );
255 // It's a sorted list, so as soon as we go past `Sector` we know
257 if(ent->Num > Sector) break;
260 Mutex_Release( &Cache->Lock );
265 * \fn void IOCache_Flush( tIOCache *Cache )
266 * \brief Flush a cache
268 void IOCache_Flush( tIOCache *Cache )
272 if( Cache->Mode == IOCACHE_VIRTUAL ) return;
275 Mutex_Acquire( &Cache->Lock );
276 if(Cache->CacheSize == 0) {
277 Mutex_Release( &Cache->Lock );
282 for( ent = Cache->Entries; ent; ent = ent->Next )
284 Cache->Write(Cache->ID, ent->Num, ent->Data);
288 Mutex_Release( &Cache->Lock );
292 * \fn void IOCache_Destroy( tIOCache *Cache )
293 * \brief Destroy a cache
295 void IOCache_Destroy( tIOCache *Cache )
297 tIOCache_Ent *ent, *prev = NULL;
300 Mutex_Acquire( &Cache->Lock );
301 if(Cache->CacheSize == 0) {
302 Mutex_Release( &Cache->Lock );
307 for(ent = Cache->Entries;
309 prev = ent, ent = ent->Next, free(prev) )
311 if( Cache->Mode != IOCACHE_VIRTUAL )
313 Cache->Write(Cache->ID, ent->Num, ent->Data);
318 Cache->CacheSize = 0;
320 Mutex_Release( &Cache->Lock );
323 SHORTLOCK( &glIOCache_Caches );
326 tIOCache *prev_cache = (tIOCache*)&gIOCache_Caches;
327 for(cache = gIOCache_Caches;
329 prev_cache = cache, cache = cache->Next )
332 prev_cache->Next = cache->Next;
337 SHORTREL( &glIOCache_Caches );