5 * By thePowersGang (John Hodge)
7 * TODO: Convert to use spare physical pages instead
14 typedef struct sIOCache_Ent tIOCache_Ent;
15 typedef struct sIOCache_PageInfo tIOCache_PageInfo;
27 struct sIOCache_PageInfo
29 tIOCache_PageInfo *GlobalNext;
30 tIOCache_PageInfo *CacheNext;
43 tIOCache_WriteCallback Write;
46 tIOCache_Ent *Entries;
50 tShortSpinlock glIOCache_Caches;
51 tIOCache *gIOCache_Caches = NULL;
52 int giIOCache_NumCaches = 0;
53 tIOCache_PageInfo *gIOCache_GlobalPages;
57 * \fn tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
58 * \brief Creates a new IO Cache
60 tIOCache *IOCache_Create( tIOCache_WriteCallback Write, Uint32 ID, int SectorSize, int CacheSize )
62 tIOCache *ret = calloc( 1, sizeof(tIOCache) );
68 ret->SectorSize = SectorSize;
69 ret->Mode = IOCACHE_WRITEBACK;
72 ret->CacheSize = CacheSize;
75 SHORTLOCK( &glIOCache_Caches );
76 ret->Next = gIOCache_Caches;
77 gIOCache_Caches = ret;
78 SHORTREL( &glIOCache_Caches );
85 * \fn int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
86 * \brief Read from a cached sector
88 int IOCache_Read( tIOCache *Cache, Uint64 Sector, void *Buffer )
92 ENTER("pCache XSector pBuffer", Cache, Sector, Buffer);
95 if(!Cache || !Buffer) {
101 Mutex_Acquire( &Cache->Lock );
102 if(Cache->CacheSize == 0) {
103 Mutex_Release( &Cache->Lock );
109 for( ent = Cache->Entries; ent; ent = ent->Next )
111 // Have we found what we are looking for?
112 if( ent->Num == Sector ) {
113 memcpy(Buffer, ent->Data, Cache->SectorSize);
114 ent->LastAccess = now();
115 Mutex_Release( &Cache->Lock );
119 // It's a sorted list, so as soon as we go past `Sector` we know
121 if(ent->Num > Sector) break;
124 Mutex_Release( &Cache->Lock );
130 * \fn int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
131 * \brief Cache a sector
133 int IOCache_Add( tIOCache *Cache, Uint64 Sector, void *Buffer )
135 tIOCache_Ent *ent, *prev;
137 tIOCache_Ent *oldest = NULL, *oldestPrev;
140 if(!Cache || !Buffer)
144 Mutex_Acquire( &Cache->Lock );
145 if(Cache->CacheSize == 0) {
146 Mutex_Release( &Cache->Lock );
151 prev = (tIOCache_Ent*)&Cache->Entries;
152 for( ent = Cache->Entries; ent; prev = ent, ent = ent->Next )
154 // Is it already here?
155 if( ent->Num == Sector ) {
156 Mutex_Release( &Cache->Lock );
160 // Check if we have found the oldest entry
161 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
167 if(ent->Num > Sector)
171 // Create the new entry
172 new = malloc( sizeof(tIOCache_Ent) + Cache->SectorSize );
175 new->LastAccess = now();
176 new->LastWrite = 0; // Zero is special, it means unmodified
177 memcpy(new->Data, Buffer, Cache->SectorSize);
179 // Have we reached the maximum cached entries?
180 if( Cache->CacheUsed == Cache->CacheSize )
182 tIOCache_Ent *savedPrev = prev;
183 oldestPrev = (tIOCache_Ent*)&Cache->Entries;
184 // If so, search for the least recently accessed entry
185 for( ; ent; prev = ent, ent = ent->Next )
187 // Check if we have found the oldest entry
188 if( !oldest || oldest->LastAccess > ent->LastAccess ) {
193 // Remove from list, write back and free
194 oldestPrev->Next = oldest->Next;
195 if(oldest->LastWrite && Cache->Mode != IOCACHE_VIRTUAL)
196 Cache->Write(Cache->ID, oldest->Num, oldest->Data);
199 // Decrement the used count
211 Mutex_Release( &Cache->Lock );
218 * \fn int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
219 * \brief Read from a cached sector
221 int IOCache_Write( tIOCache *Cache, Uint64 Sector, void *Buffer )
226 if(!Cache || !Buffer)
229 Mutex_Acquire( &Cache->Lock );
230 if(Cache->CacheSize == 0) {
231 Mutex_Release( &Cache->Lock );
236 for( ent = Cache->Entries; ent; ent = ent->Next )
238 // Have we found what we are looking for?
239 if( ent->Num == Sector ) {
240 memcpy(ent->Data, Buffer, Cache->SectorSize);
241 ent->LastAccess = ent->LastWrite = now();
243 if(Cache->Mode == IOCACHE_WRITEBACK) {
244 Cache->Write(Cache->ID, Sector, Buffer);
248 Mutex_Release( &Cache->Lock );
251 // It's a sorted list, so as soon as we go past `Sector` we know
253 if(ent->Num > Sector) break;
256 Mutex_Release( &Cache->Lock );
261 * \fn void IOCache_Flush( tIOCache *Cache )
262 * \brief Flush a cache
264 void IOCache_Flush( tIOCache *Cache )
268 if( Cache->Mode == IOCACHE_VIRTUAL ) return;
271 Mutex_Acquire( &Cache->Lock );
272 if(Cache->CacheSize == 0) {
273 Mutex_Release( &Cache->Lock );
278 for( ent = Cache->Entries; ent; ent = ent->Next )
280 Cache->Write(Cache->ID, ent->Num, ent->Data);
284 Mutex_Release( &Cache->Lock );
288 * \fn void IOCache_Destroy( tIOCache *Cache )
289 * \brief Destroy a cache
291 void IOCache_Destroy( tIOCache *Cache )
293 tIOCache_Ent *ent, *prev = NULL;
296 Mutex_Acquire( &Cache->Lock );
297 if(Cache->CacheSize == 0) {
298 Mutex_Release( &Cache->Lock );
303 for(ent = Cache->Entries;
305 prev = ent, ent = ent->Next, free(prev) )
307 if( Cache->Mode != IOCACHE_VIRTUAL )
309 Cache->Write(Cache->ID, ent->Num, ent->Data);
314 Cache->CacheSize = 0;
316 Mutex_Release( &Cache->Lock );
319 SHORTLOCK( &glIOCache_Caches );
322 tIOCache *prev_cache = (tIOCache*)&gIOCache_Caches;
323 for(cache = gIOCache_Caches;
325 prev_cache = cache, cache = cache->Next )
328 prev_cache->Next = cache->Next;
333 SHORTREL( &glIOCache_Caches );