5 int icacheprefetch = 1;
7 typedef struct ICache ICache;
8 typedef struct IHash IHash;
9 typedef struct ISum ISum;
20 * gcc 4.3 inlines the pushfirst loop in initicache,
21 * but the inliner incorrectly deduces that
22 * icache.free.next has a constant value
23 * throughout the loop. (In fact, pushfirst
24 * assigns to it as ie->prev->next.)
25 * Marking it volatile should avoid this bug.
26 * The speed of linked list operations is dwarfed
27 * by the disk i/o anyway.
47 * Hash table of IEntries
71 ih = vtmallocz(sizeof(IHash)+size*sizeof(ih->table[0]));
72 ih->table = (IEntry**)(ih+1);
79 ihashlookup(IHash *ih, u8int score[VtScoreSize], int type)
84 h = hashbits(score, ih->bits);
85 for(ie=ih->table[h]; ie; ie=ie->nexthash)
86 if((type == -1 || type == ie->ia.type) && scorecmp(score, ie->score) == 0)
92 ihashdelete(IHash *ih, IEntry *ie, char *what)
97 h = hashbits(ie->score, ih->bits);
98 for(l=&ih->table[h]; *l; l=&(*l)->nexthash)
103 fprint(2, "warning: %s %V not found in ihashdelete\n", what, ie->score);
107 ihashinsert(IHash *ih, IEntry *ie)
111 h = hashbits(ie->score, ih->bits);
112 ie->nexthash = ih->table[h];
124 if(ie->prev == nil && ie->next == nil)
126 ie->prev->next = ie->next;
127 ie->next->prev = ie->prev;
134 poplast(volatile IEntry *list)
136 if(list->prev == list)
138 return popout(list->prev);
142 pushfirst(volatile IEntry *list, IEntry *ie)
145 ie->prev = (IEntry*)list;
146 ie->next = list->next;
153 * Arena summary cache.
168 scachelookup(u64int addr)
173 for(i=0; i<icache.nsum; i++){
175 if(s->addr <= addr && addr < s->limit){
177 memmove(icache.sum+1, icache.sum, i*sizeof icache.sum[0]);
191 for(i=0; i<s->nentries; i++)
192 ihashdelete(icache.shash, &s->entries[i], "scache");
207 for(i=icache.nsum-1; i>=0; i--){
209 if(canqlock(&s->lock)){
211 memmove(icache.sum+1, icache.sum, i*sizeof icache.sum[0]);
222 scachehit(u64int addr)
224 scachelookup(addr); /* for move-to-front */
228 scachesetup(ISum *s, u64int addr)
233 s->arena = amapitoag(mainindex, addr, &addr0, &limit, &g);
245 n = asumload(s->arena, s->g, s->entries, ArenaCIGSize);
247 * n can be less then ArenaCIGSize, either if the clump group
248 * is the last in the arena and is only partially filled, or if there
249 * are corrupt clumps in the group -- those are not returned.
252 s->entries[i].ia.addr += s->addr;
253 ihashinsert(icache.shash, &s->entries[i]);
255 //fprint(2, "%T scacheload %s %d - %d entries\n", s->arena->name, s->g, n);
256 addstat(StatScachePrefetch, n);
261 scachemiss(u64int addr)
267 s = scachelookup(addr);
269 /* first time: make an entry in the cache but don't populate it yet */
273 scachesetup(s, addr);
278 /* second time: load from disk */
280 if(s->loaded || !icacheprefetch){
285 return s; /* locked */
293 initicache(u32int mem0)
296 int i, entries, scache;
298 icache.full.l = &icache.lock;
301 entries = mem / (sizeof(IEntry)+sizeof(IEntry*));
302 scache = (entries/8) / ArenaCIGSize;
303 entries -= entries/8;
310 fprint(2, "icache %,d bytes = %,d entries; %d scache\n", mem0, entries, scache);
312 icache.clean.prev = icache.clean.next = &icache.clean;
313 icache.dirty.prev = icache.dirty.next = &icache.dirty;
314 icache.free.prev = icache.free.next = (IEntry*)&icache.free;
316 icache.hash = mkihash(entries);
317 icache.nentries = entries;
318 setstat(StatIcacheSize, entries);
319 icache.entries = vtmallocz(entries*sizeof icache.entries[0]);
320 icache.maxdirty = entries / 2;
321 for(i=0; i<entries; i++)
322 pushfirst(&icache.free, &icache.entries[i]);
324 icache.nsum = scache;
325 icache.sum = vtmallocz(scache*sizeof icache.sum[0]);
326 icache.sum[0] = vtmallocz(scache*sizeof icache.sum[0][0]);
327 icache.nsentries = scache * ArenaCIGSize;
328 icache.sentries = vtmallocz(scache*ArenaCIGSize*sizeof icache.sentries[0]);
329 icache.shash = mkihash(scache*ArenaCIGSize);
330 for(i=0; i<scache; i++){
331 icache.sum[i] = icache.sum[0] + i;
332 icache.sum[i]->entries = icache.sentries + i*ArenaCIGSize;
342 ie = poplast(&icache.clean);
345 ihashdelete(icache.hash, ie, "evictlru");
350 icacheinsert(u8int score[VtScoreSize], IAddr *ia, int state)
354 if((ie = poplast(&icache.free)) == nil && (ie = evictlru()) == nil){
355 addstat(StatIcacheStall, 1);
356 while((ie = poplast(&icache.free)) == nil && (ie = evictlru()) == nil){
357 // Could safely return here if state == IEClean.
358 // But if state == IEDirty, have to wait to make
359 // sure we don't lose an index write.
360 // Let's wait all the time.
363 rsleep(&icache.full);
365 addstat(StatIcacheStall, -1);
368 memmove(ie->score, score, VtScoreSize);
371 if(state == IEClean){
372 addstat(StatIcachePrefetch, 1);
373 pushfirst(&icache.clean, ie);
375 addstat(StatIcacheWrite, 1);
376 assert(state == IEDirty);
378 setstat(StatIcacheDirty, icache.ndirty);
380 pushfirst(&icache.dirty, ie);
382 ihashinsert(icache.hash, ie);
386 icachelookup(u8int score[VtScoreSize], int type, IAddr *ia)
391 addstat(StatIcacheLookup, 1);
392 if((ie = ihashlookup(icache.hash, score, type)) != nil){
394 if(ie->state == IEClean)
395 pushfirst(&icache.clean, ie);
396 addstat(StatIcacheHit, 1);
397 qunlock(&icache.lock);
401 if((ie = ihashlookup(icache.shash, score, type)) != nil){
403 icacheinsert(score, &ie->ia, IEClean);
404 scachehit(ie->ia.addr);
405 addstat(StatScacheHit, 1);
406 qunlock(&icache.lock);
409 addstat(StatIcacheMiss, 1);
410 qunlock(&icache.lock);
416 insertscore(u8int score[VtScoreSize], IAddr *ia, int state, AState *as)
421 icacheinsert(score, ia, state);
423 toload = scachemiss(ia->addr);
425 assert(state == IEDirty);
428 fprint(2, "%T insertscore IEDirty without as; called from %lux\n", getcallerpc(&score));
430 if(icache.as.aa > as->aa)
431 fprint(2, "%T insertscore: aa moving backward: %#llux -> %#llux\n", icache.as.aa, as->aa);
435 qunlock(&icache.lock);
438 qunlock(&toload->lock);
441 if(icache.ndirty >= icache.maxdirty)
445 * It's okay not to do this under icache.lock.
446 * Calling insertscore only happens when we hold
447 * the lump, meaning any searches for this block
448 * will hit in the lump cache until after we return.
451 markbloomfilter(mainindex->bloom, score);
457 lookupscore(u8int score[VtScoreSize], int type, IAddr *ia)
462 if(icachelookup(score, type, ia) >= 0){
463 addstat(StatIcacheRead, 1);
468 addstat(StatIcacheFill, 1);
469 if(loadientry(mainindex, score, type, &d) < 0)
473 insertscore(score, &d.ia, IEClean, nil);
476 addstat2(StatIcacheRead, 1, StatIcacheReadTime, msec() - ms);
481 hashbits(u8int *sc, int bits)
485 v = (sc[0] << 24) | (sc[1] << 16) | (sc[2] << 8) | sc[3];
492 icachedirtyfrac(void)
494 return (vlong)icache.ndirty*IcacheFrac / icache.nentries;
498 * Return a singly-linked list of dirty index entries.
499 * with 32-bit hash numbers between lo and hi
500 * and address < limit.
503 icachedirty(u32int lo, u32int hi, u64int limit)
509 trace(TraceProc, "icachedirty enter");
511 for(ie = icache.dirty.next; ie != &icache.dirty; ie=ie->next){
512 if(ie->state == IEDirty && ie->ia.addr <= limit){
513 h = hashbits(ie->score, 32);
514 if(lo <= h && h <= hi){
515 ie->nextdirty = dirty;
520 qunlock(&icache.lock);
521 trace(TraceProc, "icachedirty exit");
534 qunlock(&icache.lock);
539 * The singly-linked non-circular list of index entries ie
540 * has been written to disk. Move them to the clean list.
543 icacheclean(IEntry *ie)
547 trace(TraceProc, "icacheclean enter");
550 assert(ie->state == IEDirty);
551 next = ie->nextdirty;
553 popout(ie); /* from icache.dirty */
556 pushfirst(&icache.clean, ie);
558 setstat(StatIcacheDirty, icache.ndirty);
559 rwakeupall(&icache.full);
560 qunlock(&icache.lock);
561 trace(TraceProc, "icacheclean exit");
572 while((ie = evictlru()) != nil)
573 pushfirst(&icache.free, ie);
574 for(i=0; i<icache.nsum; i++){
580 qunlock(&icache.lock);