5 * By thePowersGang (John Hodge)
7 * TODO: Convert to use spare physical pages instead
14 typedef struct sIOCache_Ent tIOCache_Ent;
15 typedef struct sIOCache_PageInfo tIOCache_PageInfo;
27 struct sIOCache_PageInfo
29 tIOCache_PageInfo *GlobalNext;
30 tIOCache_PageInfo *CacheNext;
43 tIOCache_WriteCallback Write;
46 tIOCache_Ent *Entries;
50 tShortSpinlock glIOCache_Caches;
51 tIOCache *gIOCache_Caches = NULL;
52 int giIOCache_NumCaches = 0;
53 tIOCache_PageInfo *gIOCache_GlobalPages;
57 * \fn tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
58 * \brief Creates a new IO Cache
60 tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
62 tIOCache *ret = calloc( 1, sizeof(tIOCache) );
68 ret->SectorSize = SectorSize;
69 ret->Mode = IOCACHE_WRITEBACK;
72 ret->CacheSize = CacheSize;
75 SHORTLOCK( &glIOCache_Caches );
76 ret->Next = gIOCache_Caches;
77 gIOCache_Caches = ret;
78 SHORTREL( &glIOCache_Caches );
85 * \fn int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
86 * \brief Read from a cached sector
88 int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
91 ENTER("pCache XSector pBuffer", Cache, Sector, Buffer);
94 if(!Cache || !Buffer) {
100 Mutex_Acquire( &Cache->Lock );
101 if(Cache->CacheSize == 0) {
102 Mutex_Release( &Cache->Lock );
107 #if IOCACHE_USE_PAGES
108 tIOCache_PageInfo *page;
109 size_t offset = (Sector*Cache->SectorSize) % PAGE_SIZE;
110 Uint64 wanted_base = (Sector*Cache->SectorSize) & ~(PAGE_SIZE-1);
111 for( page = Cache->Pages; page; page = page->CacheNext )
114 if(page->BaseOffset < WantedBase) continue;
115 if(page->BaseOffset > WantedBase) break;
116 tmp = MM_MapTemp( page->BasePhys );
117 memcpy( Buffer, tmp + offset, Cache->SectorSize );
123 for( ent = Cache->Entries; ent; ent = ent->Next )
125 // Have we found what we are looking for?
126 if( ent->Num == Sector ) {
127 memcpy(Buffer, ent->Data, Cache->SectorSize);
128 ent->LastAccess = now();
129 Mutex_Release( &Cache->Lock );
133 // It's a sorted list, so as soon as we go past `Sector` we know
135 if(ent->Num > Sector) break;
139 Mutex_Release( &Cache->Lock );
145 * \fn int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
146 * \brief Cache a sector
148 int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
150 tIOCache_Ent *ent, *prev;
152 tIOCache_Ent *oldest = NULL, *oldestPrev;
155 if(!Cache || !Buffer)
159 Mutex_Acquire( &Cache->Lock );
160 if(Cache->CacheSize == 0) {
161 Mutex_Release( &Cache->Lock );
166 prev = (tIOCache_Ent*)&Cache->Entries;
167 for( ent = Cache->Entries; ent; prev = ent, ent = ent->Next )
169 // Is it already here?
170 if( ent->Num == Sector ) {
171 Mutex_Release( &Cache->Lock );
175 // Check if we have found the oldest entry
176 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
182 if(ent->Num > Sector)
186 // Create the new entry
187 new = malloc( sizeof(tIOCache_Ent) + Cache->SectorSize );
190 new->LastAccess = now();
191 new->LastWrite = 0; // Zero is special, it means unmodified
192 memcpy(new->Data, Buffer, Cache->SectorSize);
194 // Have we reached the maximum cached entries?
195 if( Cache->CacheUsed == Cache->CacheSize )
197 tIOCache_Ent *savedPrev = prev;
198 oldestPrev = (tIOCache_Ent*)&Cache->Entries;
199 // If so, search for the least recently accessed entry
200 for( ; ent; prev = ent, ent = ent->Next )
202 // Check if we have found the oldest entry
203 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
209 Log_Error("IOCache", "Cache full, but also empty");
212 // Remove from list, write back and free
213 oldestPrev->Next = oldest->Next;
214 if(oldest->LastWrite && Cache->Mode != IOCACHE_VIRTUAL)
215 Cache->Write(Cache->ID, oldest->Num, oldest->Data);
218 // Decrement the used count
230 Mutex_Release( &Cache->Lock );
237 * \fn int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
238 * \brief Read from a cached sector
240 int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
245 if(!Cache || !Buffer)
248 Mutex_Acquire( &Cache->Lock );
249 if(Cache->CacheSize == 0) {
250 Mutex_Release( &Cache->Lock );
255 for( ent = Cache->Entries; ent; ent = ent->Next )
257 // Have we found what we are looking for?
258 if( ent->Num == Sector ) {
259 memcpy(ent->Data, Buffer, Cache->SectorSize);
260 ent->LastAccess = ent->LastWrite = now();
262 if(Cache->Mode == IOCACHE_WRITEBACK) {
263 Cache->Write(Cache->ID, Sector, Buffer);
267 Mutex_Release( &Cache->Lock );
270 // It's a sorted list, so as soon as we go past `Sector` we know
272 if(ent->Num > Sector) break;
275 Mutex_Release( &Cache->Lock );
280 * \fn void IOCache_Flush( tIOCache *Cache )
281 * \brief Flush a cache
283 void IOCache_Flush( tIOCache *Cache )
287 if( Cache->Mode == IOCACHE_VIRTUAL ) return;
290 Mutex_Acquire( &Cache->Lock );
291 if(Cache->CacheSize == 0) {
292 Mutex_Release( &Cache->Lock );
297 for( ent = Cache->Entries; ent; ent = ent->Next )
299 Cache->Write(Cache->ID, ent->Num, ent->Data);
303 Mutex_Release( &Cache->Lock );
307 * \fn void IOCache_Destroy( tIOCache *Cache )
308 * \brief Destroy a cache
310 void IOCache_Destroy( tIOCache *Cache )
312 tIOCache_Ent *ent, *prev = NULL;
315 Mutex_Acquire( &Cache->Lock );
316 if(Cache->CacheSize == 0) {
317 Mutex_Release( &Cache->Lock );
322 for(ent = Cache->Entries;
324 prev = ent, ent = ent->Next, free(prev) )
326 if( Cache->Mode != IOCACHE_VIRTUAL )
328 Cache->Write(Cache->ID, ent->Num, ent->Data);
333 Cache->CacheSize = 0;
335 Mutex_Release( &Cache->Lock );
338 SHORTLOCK( &glIOCache_Caches );
341 tIOCache *prev_cache = (tIOCache*)&gIOCache_Caches;
342 for(cache = gIOCache_Caches;
344 prev_cache = cache, cache = cache->Next )
347 prev_cache->Next = cache->Next;
352 SHORTREL( &glIOCache_Caches );