Blob


1 /*
2 * Memory-only VtBlock cache.
3 *
4 * The cached Venti blocks are in the hash chains.
5 * The cached local blocks are only in the blocks array.
6 * The free blocks are in the heap, which is supposed to
7 * be indexed by second-to-last use but actually
8 * appears to be last use.
9 */
11 #include <u.h>
12 #include <libc.h>
13 #include <venti.h>
15 int nread, ncopy, nwrite;
17 enum {
18 BioLocal = 1,
19 BioVenti,
20 BioReading,
21 BioWriting,
22 BioEmpty,
23 BioVentiError,
24 };
25 enum {
26 BadHeap = ~0,
27 };
28 struct VtCache
29 {
30 QLock lk;
31 VtConn *z;
32 u32int blocksize;
33 u32int now; /* ticks for usage time stamps */
34 VtBlock **hash; /* hash table for finding addresses */
35 int nhash;
36 VtBlock **heap; /* heap for finding victims */
37 int nheap;
38 VtBlock *block; /* all allocated blocks */
39 int nblock;
40 uchar *mem; /* memory for all blocks and data */
41 int mode;
42 int (*write)(VtConn*, uchar[VtScoreSize], uint, uchar*, int);
43 };
45 static void cachecheck(VtCache*);
47 VtCache*
48 vtcachealloc(VtConn *z, int blocksize, ulong nblock, int mode)
49 {
50 uchar *p;
51 VtCache *c;
52 int i;
53 VtBlock *b;
55 c = vtmallocz(sizeof(VtCache));
57 c->z = z;
58 c->blocksize = (blocksize + 127) & ~127;
59 c->nblock = nblock;
60 c->nhash = nblock;
61 c->hash = vtmallocz(nblock*sizeof(VtBlock*));
62 c->heap = vtmallocz(nblock*sizeof(VtBlock*));
63 c->block = vtmallocz(nblock*sizeof(VtBlock));
64 c->mem = vtmallocz(nblock*c->blocksize);
65 c->mode = mode;
66 c->write = vtwrite;
68 p = c->mem;
69 for(i=0; i<nblock; i++){
70 b = &c->block[i];
71 b->addr = NilBlock;
72 b->c = c;
73 b->data = p;
74 b->heap = i;
75 c->heap[i] = b;
76 p += c->blocksize;
77 }
78 c->nheap = nblock;
79 cachecheck(c);
80 return c;
81 }
83 /*
84 * BUG This is here so that vbackup can override it and do some
85 * pipelining of writes. Arguably vtwrite or vtwritepacket or the
86 * cache itself should be providing this functionality.
87 */
88 void
89 vtcachesetwrite(VtCache *c, int (*write)(VtConn*, uchar[VtScoreSize], uint, uchar*, int))
90 {
91 if(write == nil)
92 write = vtwrite;
93 c->write = write;
94 }
96 void
97 vtcachefree(VtCache *c)
98 {
99 int i;
101 qlock(&c->lk);
103 cachecheck(c);
104 for(i=0; i<c->nblock; i++)
105 assert(c->block[i].ref == 0);
107 vtfree(c->hash);
108 vtfree(c->heap);
109 vtfree(c->block);
110 vtfree(c->mem);
111 vtfree(c);
114 static void
115 vtcachedump(VtCache *c)
117 int i;
118 VtBlock *b;
120 for(i=0; i<c->nblock; i++){
121 b = &c->block[i];
122 print("cache block %d: type %d score %V iostate %d addr %d ref %d nlock %d\n",
123 i, b->type, b->score, b->iostate, b->addr, b->ref, b->nlock);
127 static void
128 cachecheck(VtCache *c)
130 u32int size, now;
131 int i, k, refed;
132 VtBlock *b;
134 size = c->blocksize;
135 now = c->now;
137 for(i = 0; i < c->nheap; i++){
138 if(c->heap[i]->heap != i)
139 sysfatal("mis-heaped at %d: %d", i, c->heap[i]->heap);
140 if(i > 0 && c->heap[(i - 1) >> 1]->used - now > c->heap[i]->used - now)
141 sysfatal("bad heap ordering");
142 k = (i << 1) + 1;
143 if(k < c->nheap && c->heap[i]->used - now > c->heap[k]->used - now)
144 sysfatal("bad heap ordering");
145 k++;
146 if(k < c->nheap && c->heap[i]->used - now > c->heap[k]->used - now)
147 sysfatal("bad heap ordering");
150 refed = 0;
151 for(i = 0; i < c->nblock; i++){
152 b = &c->block[i];
153 if(b->data != &c->mem[i * size])
154 sysfatal("mis-blocked at %d", i);
155 if(b->ref && b->heap == BadHeap)
156 refed++;
157 else if(b->addr != NilBlock)
158 refed++;
160 if(c->nheap + refed != c->nblock){
161 fprint(2, "cachecheck: nheap %d refed %d nblocks %d\n", c->nheap, refed, c->nblock);
162 //vtcachedump(c);
164 assert(c->nheap + refed == c->nblock);
165 refed = 0;
166 for(i = 0; i < c->nblock; i++){
167 b = &c->block[i];
168 if(b->ref){
169 if(1)fprint(2, "a=%ud %V ref=%d\n", b->addr, b->score, b->ref);
170 refed++;
173 if(refed > 0)fprint(2, "cachecheck: in used %d\n", refed);
176 static int
177 upheap(int i, VtBlock *b)
179 VtBlock *bb;
180 u32int now;
181 int p;
182 VtCache *c;
184 c = b->c;
185 now = c->now;
186 for(; i != 0; i = p){
187 p = (i - 1) >> 1;
188 bb = c->heap[p];
189 if(b->used - now >= bb->used - now)
190 break;
191 c->heap[i] = bb;
192 bb->heap = i;
194 c->heap[i] = b;
195 b->heap = i;
197 return i;
200 static int
201 downheap(int i, VtBlock *b)
203 VtBlock *bb;
204 u32int now;
205 int k;
206 VtCache *c;
208 c = b->c;
209 now = c->now;
210 for(; ; i = k){
211 k = (i << 1) + 1;
212 if(k >= c->nheap)
213 break;
214 if(k + 1 < c->nheap && c->heap[k]->used - now > c->heap[k + 1]->used - now)
215 k++;
216 bb = c->heap[k];
217 if(b->used - now <= bb->used - now)
218 break;
219 c->heap[i] = bb;
220 bb->heap = i;
222 c->heap[i] = b;
223 b->heap = i;
224 return i;
227 /*
228 * Delete a block from the heap.
229 * Called with c->lk held.
230 */
231 static void
232 heapdel(VtBlock *b)
234 int i, si;
235 VtCache *c;
237 c = b->c;
239 si = b->heap;
240 if(si == BadHeap)
241 return;
242 b->heap = BadHeap;
243 c->nheap--;
244 if(si == c->nheap)
245 return;
246 b = c->heap[c->nheap];
247 i = upheap(si, b);
248 if(i == si)
249 downheap(i, b);
252 /*
253 * Insert a block into the heap.
254 * Called with c->lk held.
255 */
256 static void
257 heapins(VtBlock *b)
259 assert(b->heap == BadHeap);
260 upheap(b->c->nheap++, b);
263 /*
264 * locate the vtBlock with the oldest second to last use.
265 * remove it from the heap, and fix up the heap.
266 */
267 /* called with c->lk held */
268 static VtBlock*
269 vtcachebumpblock(VtCache *c)
271 VtBlock *b;
273 /*
274 * locate the vtBlock with the oldest second to last use.
275 * remove it from the heap, and fix up the heap.
276 */
277 if(c->nheap == 0){
278 vtcachedump(c);
279 fprint(2, "vtcachebumpblock: no free blocks in vtCache");
280 abort();
282 b = c->heap[0];
283 heapdel(b);
285 assert(b->heap == BadHeap);
286 assert(b->ref == 0);
288 /*
289 * unchain the vtBlock from hash chain if any
290 */
291 if(b->prev){
292 *(b->prev) = b->next;
293 if(b->next)
294 b->next->prev = b->prev;
295 b->prev = nil;
299 if(0)fprint(2, "droping %x:%V\n", b->addr, b->score);
300 /* set vtBlock to a reasonable state */
301 b->ref = 1;
302 b->iostate = BioEmpty;
303 return b;
306 /*
307 * fetch a local block from the memory cache.
308 * if it's not there, load it, bumping some other Block.
309 * if we're out of free blocks, we're screwed.
310 */
311 VtBlock*
312 vtcachelocal(VtCache *c, u32int addr, int type)
314 VtBlock *b;
316 if(addr >= c->nblock)
317 sysfatal("vtcachelocal: asked for block #%ud; only %d blocks\n",
318 addr, c->nblock);
320 b = &c->block[addr];
321 if(b->addr == NilBlock || b->iostate != BioLocal)
322 sysfatal("vtcachelocal: block is not local");
324 if(b->type != type)
325 sysfatal("vtcachelocal: block has wrong type %d != %d", b->type, type);
327 qlock(&c->lk);
328 b->ref++;
329 qunlock(&c->lk);
331 qlock(&b->lk);
332 b->nlock = 1;
333 return b;
336 VtBlock*
337 vtcacheallocblock(VtCache *c, int type)
339 VtBlock *b;
341 qlock(&c->lk);
342 b = vtcachebumpblock(c);
343 b->iostate = BioLocal;
344 b->type = type;
345 b->addr = b - c->block;
346 vtzeroextend(type, b->data, 0, c->blocksize);
347 vtlocaltoglobal(b->addr, b->score);
348 qunlock(&c->lk);
350 qlock(&b->lk);
351 b->nlock = 1;
353 return b;
356 /*
357 * fetch a global (Venti) block from the memory cache.
358 * if it's not there, load it, bumping some other block.
359 */
360 VtBlock*
361 vtcacheglobal(VtCache *c, uchar score[VtScoreSize], int type)
363 VtBlock *b;
364 ulong h;
365 int n;
366 u32int addr;
368 addr = vtglobaltolocal(score);
369 if(addr != NilBlock)
370 return vtcachelocal(c, addr, type);
372 h = (u32int)(score[0]|(score[1]<<8)|(score[2]<<16)|(score[3]<<24)) % c->nhash;
374 /*
375 * look for the block in the cache
376 */
377 qlock(&c->lk);
378 for(b = c->hash[h]; b != nil; b = b->next){
379 if(b->addr != NilBlock || memcmp(b->score, score, VtScoreSize) != 0 || b->type != type)
380 continue;
381 heapdel(b);
382 b->ref++;
383 qunlock(&c->lk);
384 qlock(&b->lk);
385 b->nlock = 1;
386 if(b->iostate == BioVentiError){
387 if(chattyventi)
388 fprint(2, "cached read error for %V\n", score);
389 werrstr("venti i/o error");
390 vtblockput(b);
391 return nil;
393 return b;
396 /*
397 * not found
398 */
399 b = vtcachebumpblock(c);
400 b->addr = NilBlock;
401 b->type = type;
402 memmove(b->score, score, VtScoreSize);
403 /* chain onto correct hash */
404 b->next = c->hash[h];
405 c->hash[h] = b;
406 if(b->next != nil)
407 b->next->prev = &b->next;
408 b->prev = &c->hash[h];
410 /*
411 * Lock b before unlocking c, so that others wait while we read.
413 * You might think there is a race between this qlock(b) before qunlock(c)
414 * and the qlock(c) while holding a qlock(b) in vtblockwrite. However,
415 * the block here can never be the block in a vtblockwrite, so we're safe.
416 * We're certainly living on the edge.
417 */
418 qlock(&b->lk);
419 b->nlock = 1;
420 qunlock(&c->lk);
422 n = vtread(c->z, score, type, b->data, c->blocksize);
423 if(n < 0){
424 werrstr("vtread %V: %r", score);
425 if(chattyventi)
426 fprint(2, "read %V: %r\n", score);
427 b->iostate = BioVentiError;
428 vtblockput(b);
429 return nil;
431 vtzeroextend(type, b->data, n, c->blocksize);
432 b->iostate = BioVenti;
433 b->nlock = 1;
434 b->decrypted = 0;
435 return b;
438 /*
439 * The thread that has locked b may refer to it by
440 * multiple names. Nlock counts the number of
441 * references the locking thread holds. It will call
442 * vtblockput once per reference.
443 */
444 void
445 vtblockduplock(VtBlock *b)
447 assert(b->nlock > 0);
448 b->nlock++;
451 /*
452 * we're done with the block.
453 * unlock it. can't use it after calling this.
454 */
455 void
456 vtblockput(VtBlock* b)
458 VtCache *c;
460 if(b == nil)
461 return;
463 if(0)fprint(2, "vtblockput: %d: %x %d %d\n", getpid(), b->addr, c->nheap, b->iostate);
465 if(--b->nlock > 0)
466 return;
468 /*
469 * b->nlock should probably stay at zero while
470 * the vtBlock is unlocked, but diskThread and vtSleep
471 * conspire to assume that they can just qlock(&b->lk); vtblockput(b),
472 * so we have to keep b->nlock set to 1 even
473 * when the vtBlock is unlocked.
474 */
475 assert(b->nlock == 0);
476 b->nlock = 1;
478 qunlock(&b->lk);
479 c = b->c;
480 qlock(&c->lk);
482 if(--b->ref > 0){
483 qunlock(&c->lk);
484 return;
487 assert(b->ref == 0);
488 switch(b->iostate){
489 case BioVenti:
490 //if(b->addr != NilBlock) print("blockput %d\n", b->addr);
491 b->used = c->now++;
492 case BioVentiError:
493 heapins(b);
494 break;
495 case BioLocal:
496 break;
498 qunlock(&c->lk);
501 int
502 vtblockwrite(VtBlock *b)
504 uchar score[VtScoreSize];
505 VtCache *c;
506 uint h;
507 int n;
509 if(b->iostate != BioLocal){
510 werrstr("vtblockwrite: not a local block");
511 return -1;
514 c = b->c;
515 n = vtzerotruncate(b->type, b->data, c->blocksize);
516 if(c->write(c->z, score, b->type, b->data, n) < 0)
517 return -1;
519 memmove(b->score, score, VtScoreSize);
521 qlock(&c->lk);
522 b->iostate = BioVenti;
523 h = (u32int)(score[0]|(score[1]<<8)|(score[2]<<16)|(score[3]<<24)) % c->nhash;
524 b->next = c->hash[h];
525 c->hash[h] = b;
526 if(b->next != nil)
527 b->next->prev = &b->next;
528 b->prev = &c->hash[h];
529 qunlock(&c->lk);
530 return 0;
533 uint
534 vtcacheblocksize(VtCache *c)
536 return c->blocksize;
539 VtBlock*
540 vtblockcopy(VtBlock *b)
542 VtBlock *bb;
544 ncopy++;
545 bb = vtcacheallocblock(b->c, b->type);
546 if(bb == nil){
547 vtblockput(b);
548 return nil;
550 memmove(bb->data, b->data, b->c->blocksize);
551 vtblockput(b);
552 return bb;
555 void
556 vtlocaltoglobal(u32int addr, uchar score[VtScoreSize])
558 memset(score, 0, 16);
559 score[16] = addr>>24;
560 score[17] = addr>>16;
561 score[18] = addr>>8;
562 score[19] = addr;
566 u32int
567 vtglobaltolocal(uchar score[VtScoreSize])
569 static uchar zero[16];
570 if(memcmp(score, zero, 16) != 0)
571 return NilBlock;
572 return (score[16]<<24)|(score[17]<<16)|(score[18]<<8)|score[19];
575 int
576 vtblockdirty(VtBlock *b)
578 USED(b);
579 return 0;