Blob


1 #include "stdinc.h"
2 #include "vac.h"
3 #include "dat.h"
4 #include "fns.h"
6 typedef struct Label Label;
8 enum {
9 BadHeap = ~0,
10 };
12 /*
13 * the plan is to store data to the cache in c->size blocks
14 * with the block zero extended to fill it out. When writing to
15 * venti, the block will be zero truncated. The walker will also check
16 * that the block fits within psize or dsize as the case may be.
17 */
19 struct Cache
20 {
21 VtLock *lk;
22 VtSession *z;
23 u32int now; /* ticks for usage timestamps */
24 int size; /* max. size of any block; allocated to each block */
25 Lump **heads; /* hash table for finding address */
26 int nheap; /* number of available victims */
27 Lump **heap; /* heap for locating victims */
28 long nblocks; /* number of blocks allocated */
29 Lump *blocks; /* array of block descriptors */
30 u8int *mem; /* memory for all block descriptors */
31 Lump *free; /* free list of lumps */
33 long hashSize;
34 };
36 /*
37 * the tag for a block is hash(index, parent tag)
38 */
40 struct Label {
41 uchar gen[4];
42 uchar state;
43 uchar type; /* top bit indicates it is part of a directory */
44 uchar tag[4]; /* tag of file it is in */
45 };
48 static char ENoDir[] = "directory entry is not allocated";
50 static void fixHeap(int si, Lump *b);
51 static int upHeap(int i, Lump *b);
52 static int downHeap(int i, Lump *b);
53 static char *lumpState(int);
54 static void lumpSetState(Lump *u, int state);
56 Cache *
57 cacheAlloc(VtSession *z, int blockSize, long nblocks)
58 {
59 int i;
60 Cache *c;
61 Lump *b;
63 c = vtMemAllocZ(sizeof(Cache));
65 c->lk = vtLockAlloc();
66 c->z = z;
67 c->size = blockSize;
68 c->nblocks = nblocks;
69 c->hashSize = nblocks;
70 c->heads = vtMemAllocZ(c->hashSize*sizeof(Lump*));
71 c->heap = vtMemAllocZ(nblocks*sizeof(Lump*));
72 c->blocks = vtMemAllocZ(nblocks*sizeof(Lump));
73 c->mem = vtMemAllocZ(nblocks * blockSize);
74 for(i = 0; i < nblocks; i++){
75 b = &c->blocks[i];
76 b->lk = vtLockAlloc();
77 b->c = c;
78 b->data = &c->mem[i * blockSize];
79 b->addr = i+1;
80 b->state = LumpFree;
81 b->heap = BadHeap;
82 b->next = c->free;
83 c->free = b;
84 }
85 c->nheap = 0;
87 return c;
88 }
90 long
91 cacheGetSize(Cache *c)
92 {
93 return c->nblocks;
94 }
96 int
97 cacheGetBlockSize(Cache *c)
98 {
99 return c->size;
102 int
103 cacheSetSize(Cache *c, long nblocks)
105 USED(c);
106 USED(nblocks);
107 return 0;
110 void
111 cacheFree(Cache *c)
113 int i;
115 for(i = 0; i < c->nblocks; i++){
116 assert(c->blocks[i].ref == 0);
117 vtLockFree(c->blocks[i].lk);
119 vtMemFree(c->heads);
120 vtMemFree(c->blocks);
121 vtMemFree(c->mem);
122 vtMemFree(c);
125 static u32int
126 hash(Cache *c, uchar score[VtScoreSize], int type)
128 u32int h;
129 uchar *p = score + VtScoreSize-4;
131 h = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
132 h += type;
133 return h % c->hashSize;
136 static void
137 findLump(Cache *c, Lump *bb)
139 Lump *b, *last;
140 int h;
142 last = nil;
143 h = hash(c, bb->score, bb->type);
144 for(b = c->heads[h]; b != nil; b = b->next){
145 if(last != b->prev)
146 vtFatal("bad prev link");
147 if(b == bb)
148 return;
149 last = b;
151 vtFatal("block missing from hash table");
154 void
155 cacheCheck(Cache *c)
157 u32int size, now;
158 int i, k, refed, free;
159 static uchar zero[VtScoreSize];
160 Lump *p;
162 size = c->size;
163 now = c->now;
165 free = 0;
166 for(p=c->free; p; p=p->next)
167 free++;
168 for(i = 0; i < c->nheap; i++){
169 if(c->heap[i]->heap != i)
170 vtFatal("mis-heaped at %d: %d", i, c->heap[i]->heap);
171 if(i > 0 && c->heap[(i - 1) >> 1]->used2 - now > c->heap[i]->used2 - now)
172 vtFatal("bad heap ordering");
173 k = (i << 1) + 1;
174 if(k < c->nheap && c->heap[i]->used2 - now > c->heap[k]->used2 - now)
175 vtFatal("bad heap ordering");
176 k++;
177 if(k < c->nheap && c->heap[i]->used2 - now > c->heap[k]->used2 - now)
178 vtFatal("bad heap ordering");
181 refed = 0;
182 for(i = 0; i < c->nblocks; i++){
183 if(c->blocks[i].data != &c->mem[i * size])
184 vtFatal("mis-blocked at %d", i);
185 if(c->blocks[i].ref && c->blocks[i].heap == BadHeap){
186 refed++;
188 if(memcmp(zero, c->blocks[i].score, VtScoreSize))
189 findLump(c, &c->blocks[i]);
191 if(refed > 0)fprint(2, "cacheCheck: nheap %d refed %d free %d\n", c->nheap, refed, free);
192 assert(c->nheap + refed + free == c->nblocks);
193 refed = 0;
194 for(i = 0; i < c->nblocks; i++){
195 if(c->blocks[i].ref) {
196 if(1)fprint(2, "%d %V %d %s\n", c->blocks[i].type, c->blocks[i].score, c->blocks[i].ref, lumpState(c->blocks[i].state));
197 refed++;
200 if(refed > 0)fprint(2, "cacheCheck: in used %d\n", refed);
203 /*
204 * delete an arbitrary block from the heap
205 */
206 static void
207 delHeap(Lump *db)
209 fixHeap(db->heap, db->c->heap[--db->c->nheap]);
210 db->heap = BadHeap;
213 static void
214 fixHeap(int si, Lump *b)
216 int i;
218 i = upHeap(si, b);
219 if(i == si)
220 downHeap(i, b);
223 static int
224 upHeap(int i, Lump *b)
226 Lump *bb;
227 u32int now;
228 int p;
229 Cache *c;
231 c = b->c;
232 now = c->now;
233 for(; i != 0; i = p){
234 p = (i - 1) >> 1;
235 bb = c->heap[p];
236 if(b->used2 - now >= bb->used2 - now)
237 break;
238 c->heap[i] = bb;
239 bb->heap = i;
241 c->heap[i] = b;
242 b->heap = i;
244 return i;
247 static int
248 downHeap(int i, Lump *b)
250 Lump *bb;
251 u32int now;
252 int k;
253 Cache *c;
255 c = b->c;
256 now = c->now;
257 for(; ; i = k){
258 k = (i << 1) + 1;
259 if(k >= c->nheap)
260 break;
261 if(k + 1 < c->nheap && c->heap[k]->used2 - now > c->heap[k + 1]->used2 - now)
262 k++;
263 bb = c->heap[k];
264 if(b->used2 - now <= bb->used2 - now)
265 break;
266 c->heap[i] = bb;
267 bb->heap = i;
269 c->heap[i] = b;
270 b->heap = i;
271 return i;
275 /* called with c->lk held */
276 Lump *
277 cacheBumpLump(Cache *c)
279 Lump *b;
281 /*
282 * missed: locate the block with the oldest second to last use.
283 * remove it from the heap, and fix up the heap.
284 */
285 if(c->free) {
286 b = c->free;
287 c->free = b->next;
288 } else {
289 for(;;){
290 if(c->nheap == 0) {
291 cacheCheck(c);
292 assert(0);
293 return nil;
295 b = c->heap[0];
296 delHeap(b);
297 if(b->ref == 0)
298 break;
301 /*
302 * unchain the block from hash chain
303 */
304 if(b->prev == nil)
305 c->heads[hash(c, b->score, b->type)] = b->next;
306 else
307 b->prev->next = b->next;
308 if(b->next != nil)
309 b->next->prev = b->prev;
313 /*
314 * the new block has no last use, so assume it happens sometime in the middle
315 */
316 b->used = (b->used2 + c->now) / 2;
317 b->asize = 0;
319 return b;
322 Lump *
323 cacheAllocLump(Cache *c, int type, int size, int dir)
325 Lump *b;
326 ulong h;
328 assert(size <= c->size);
330 again:
331 vtLock(c->lk);
332 b = cacheBumpLump(c);
333 if(b == nil) {
334 vtUnlock(c->lk);
335 fprint(2, "cache is full\n");
336 /* XXX should be better */
337 sleep(100);
338 goto again;
341 vtLock(b->lk);
343 assert(b->ref == 0);
344 b->ref++;
345 b->used2 = b->used;
346 b->used = c->now++;
348 /* convert addr into score */
349 memset(b->score, 0, VtScoreSize-4);
350 b->score[VtScoreSize-4] = b->addr>>24;
351 b->score[VtScoreSize-3] = b->addr>>16;
352 b->score[VtScoreSize-2] = b->addr>>8;
353 b->score[VtScoreSize-1] = b->addr;
355 b->dir = dir;
356 b->type = type;
357 b->gen = 0;
358 b->asize = size;
359 b->state = LumpFree;
361 h = hash(c, b->score, b->type);
363 /* chain onto correct hash */
364 b->next = c->heads[h];
365 c->heads[h] = b;
366 if(b->next != nil)
367 b->next->prev = b;
368 b->prev = nil;
370 vtUnlock(c->lk);
372 vtZeroExtend(type, b->data, 0, size);
373 lumpSetState(b, LumpActive);
375 return b;
378 int
379 scoreIsLocal(uchar score[VtScoreSize])
381 static uchar zero[VtScoreSize];
383 return memcmp(score, zero, VtScoreSize-4) == 0;
386 Lump *
387 cacheGetLump(Cache *c, uchar score[VtScoreSize], int type, int size)
389 Lump *b;
390 ulong h;
391 int n;
392 static uchar zero[VtScoreSize];
394 assert(size <= c->size);
396 h = hash(c, score, type);
398 again:
399 /*
400 * look for the block in the cache
401 */
402 vtLock(c->lk);
403 for(b = c->heads[h]; b != nil; b = b->next){
404 if(memcmp(b->score, score, VtScoreSize) == 0 && b->type == type)
405 goto found;
408 /* should not be looking for a temp block */
409 if(scoreIsLocal(score)) {
410 if(memcmp(score, zero, VtScoreSize) == 0)
411 vtSetError("looking for zero score");
412 else
413 vtSetError("missing local block");
414 vtUnlock(c->lk);
415 return nil;
418 b = cacheBumpLump(c);
419 if(b == nil) {
420 vtUnlock(c->lk);
421 sleep(100);
422 goto again;
425 /* chain onto correct hash */
426 b->next = c->heads[h];
427 c->heads[h] = b;
428 if(b->next != nil)
429 b->next->prev = b;
430 b->prev = nil;
432 memmove(b->score, score, VtScoreSize);
433 b->type = type;
434 b->state = LumpFree;
436 found:
437 b->ref++;
438 b->used2 = b->used;
439 b->used = c->now++;
440 if(b->heap != BadHeap)
441 fixHeap(b->heap, b);
443 vtUnlock(c->lk);
445 vtLock(b->lk);
446 if(b->state != LumpFree)
447 return b;
449 n = vtRead(c->z, score, type, b->data, size);
450 if(n < 0) {
451 lumpDecRef(b, 1);
452 return nil;
454 if(!vtSha1Check(score, b->data, n)) {
455 vtSetError("vtSha1Check failed");
456 lumpDecRef(b, 1);
457 return nil;
459 vtZeroExtend(type, b->data, n, size);
460 b->asize = size;
461 lumpSetState(b, LumpVenti);
463 return b;
466 static char *
467 lumpState(int state)
469 switch(state) {
470 default:
471 return "Unknown!!";
472 case LumpFree:
473 return "Free";
474 case LumpActive:
475 return "Active";
476 case LumpSnap:
477 return "Snap";
478 case LumpZombie:
479 return "Zombie";
480 case LumpVenti:
481 return "Venti";
485 static void
486 lumpSetState(Lump *u, int state)
488 // if(u->state != LumpFree)
489 // fprint(2, "%V: %s -> %s\n", u->score, lumpState(u->state), lumpState(state));
490 u->state = state;
493 int
494 lumpGetScore(Lump *u, int offset, uchar score[VtScoreSize])
496 uchar *sp;
497 VtRoot root;
498 VtEntry dir;
500 vtLock(u->lk);
502 switch(u->type) {
503 default:
504 vtSetError("bad type");
505 goto Err;
506 case VtPointerType0:
507 case VtPointerType1:
508 case VtPointerType2:
509 case VtPointerType3:
510 case VtPointerType4:
511 case VtPointerType5:
512 case VtPointerType6:
513 if((offset+1)*VtScoreSize > u->asize)
514 sp = nil;
515 else
516 sp = u->data + offset*VtScoreSize;
517 break;
518 case VtRootType:
519 if(u->asize < VtRootSize) {
520 vtSetError("runt root block");
521 goto Err;
523 if(!vtRootUnpack(&root, u->data))
524 goto Err;
525 sp = root.score;
526 break;
527 case VtDirType:
528 if((offset+1)*VtEntrySize > u->asize) {
529 vtSetError(ENoDir);
530 goto Err;
532 if(!vtEntryUnpack(&dir, u->data, offset))
533 goto Err;
534 if(!dir.flags & VtEntryActive) {
535 vtSetError(ENoDir);
536 goto Err;
538 sp = dir.score;
539 break;
542 if(sp == nil)
543 memmove(score, vtZeroScore, VtScoreSize);
544 else
545 memmove(score, sp, VtScoreSize);
547 vtUnlock(u->lk);
548 return !scoreIsLocal(score);
549 Err:
550 vtUnlock(u->lk);
551 return 0;
554 Lump *
555 lumpWalk(Lump *u, int offset, int type, int size, int readOnly, int lock)
557 Lump *v, *vv;
558 Cache *c;
559 uchar score[VtScoreSize], *sp;
560 VtRoot root;
561 VtEntry dir;
562 int split, isdir;
564 c = u->c;
565 vtLock(u->lk);
567 Again:
568 v = nil;
569 vv = nil;
571 isdir = u->dir;
572 switch(u->type) {
573 default:
574 vtSetError("bad type");
575 goto Err;
576 case VtPointerType0:
577 case VtPointerType1:
578 case VtPointerType2:
579 case VtPointerType3:
580 case VtPointerType4:
581 case VtPointerType5:
582 case VtPointerType6:
583 if((offset+1)*VtScoreSize > u->asize)
584 sp = nil;
585 else
586 sp = u->data + offset*VtScoreSize;
587 break;
588 case VtRootType:
589 if(u->asize < VtRootSize) {
590 vtSetError("runt root block");
591 goto Err;
593 if(!vtRootUnpack(&root, u->data))
594 goto Err;
595 sp = root.score;
596 break;
597 case VtDirType:
598 if((offset+1)*VtEntrySize > u->asize) {
599 vtSetError(ENoDir);
600 goto Err;
602 if(!vtEntryUnpack(&dir, u->data, offset))
603 goto Err;
604 if(!(dir.flags & VtEntryActive)) {
605 vtSetError(ENoDir);
606 goto Err;
608 isdir = (dir.flags & VtEntryDir) != 0;
609 // sp = dir.score;
610 sp = u->data + offset*VtEntrySize + 20;
611 break;
614 if(sp == nil)
615 memmove(score, vtZeroScore, VtScoreSize);
616 else
617 memmove(score, sp, VtScoreSize);
619 vtUnlock(u->lk);
622 if(0)fprint(2, "lumpWalk: %V:%s %d:%d-> %V:%d\n", u->score, lumpState(u->state), u->type, offset, score, type);
623 v = cacheGetLump(c, score, type, size);
624 if(v == nil)
625 return nil;
627 split = 1;
628 if(readOnly)
629 split = 0;
631 switch(v->state) {
632 default:
633 assert(0);
634 case LumpFree:
635 fprint(2, "block is free %V!\n", v->score);
636 vtSetError("phase error");
637 goto Err2;
638 case LumpActive:
639 if(v->gen < u->gen) {
640 print("LumpActive gen\n");
641 lumpSetState(v, LumpSnap);
642 v->gen = u->gen;
643 } else
644 split = 0;
645 break;
646 case LumpSnap:
647 case LumpVenti:
648 break;
651 /* easy case */
652 if(!split) {
653 if(!lock)
654 vtUnlock(v->lk);
655 return v;
658 if(sp == nil) {
659 vtSetError("bad offset");
660 goto Err2;
663 vv = cacheAllocLump(c, v->type, size, isdir);
664 /* vv is locked */
665 vv->gen = u->gen;
666 memmove(vv->data, v->data, v->asize);
667 if(0)fprint(2, "split %V into %V\n", v->score, vv->score);
669 lumpDecRef(v, 1);
670 v = nil;
672 vtLock(u->lk);
673 if(u->state != LumpActive) {
674 vtSetError("bad parent state: can not happen");
675 goto Err;
678 /* check that nothing changed underfoot */
679 if(memcmp(sp, score, VtScoreSize) != 0) {
680 lumpDecRef(vv, 1);
681 fprint(2, "lumpWalk: parent changed under foot\n");
682 goto Again;
685 /* XXX - hold Active blocks up - will go eventually */
686 lumpIncRef(vv);
688 /* change the parent */
689 memmove(sp, vv->score, VtScoreSize);
691 vtUnlock(u->lk);
693 if(!lock)
694 vtUnlock(vv->lk);
695 return vv;
696 Err:
697 vtUnlock(u->lk);
698 lumpDecRef(v, 0);
699 lumpDecRef(vv, 1);
700 return nil;
701 Err2:
702 lumpDecRef(v, 1);
703 return nil;
707 void
708 lumpFreeEntry(Lump *u, int entry)
710 uchar score[VtScoreSize];
711 int type;
712 ulong gen;
713 VtEntry dir;
714 Cache *c;
716 c = u->c;
717 vtLock(u->lk);
718 if(u->state == LumpVenti)
719 goto Exit;
721 switch(u->type) {
722 default:
723 fprint(2, "freeing bad lump type: %d\n", u->type);
724 return;
725 case VtPointerType0:
726 if((entry+1)*VtScoreSize > u->asize)
727 goto Exit;
728 memmove(score, u->data + entry*VtScoreSize, VtScoreSize);
729 memmove(u->data + entry*VtScoreSize, vtZeroScore, VtScoreSize);
730 type = u->dir?VtDirType:VtDataType;
731 break;
732 case VtPointerType1:
733 case VtPointerType2:
734 case VtPointerType3:
735 case VtPointerType4:
736 case VtPointerType5:
737 case VtPointerType6:
738 if((entry+1)*VtScoreSize > u->asize)
739 goto Exit;
740 memmove(score, u->data + entry*VtScoreSize, VtScoreSize);
741 memmove(u->data + entry*VtScoreSize, vtZeroScore, VtScoreSize);
742 type = u->type-1;
743 break;
744 case VtDirType:
745 if((entry+1)*VtEntrySize > u->asize)
746 goto Exit;
747 if(!vtEntryUnpack(&dir, u->data, entry))
748 goto Exit;
749 if(!dir.flags & VtEntryActive)
750 goto Exit;
751 gen = dir.gen;
752 if(gen != ~0)
753 gen++;
754 if(dir.depth == 0)
755 type = (dir.flags&VtEntryDir)?VtDirType:VtDataType;
756 else
757 type = VtPointerType0 + dir.depth - 1;
758 memmove(score, dir.score, VtScoreSize);
759 memset(&dir, 0, sizeof(dir));
760 dir.gen = gen;
761 vtEntryPack(&dir, u->data, entry);
762 break;
763 case VtDataType:
764 type = VtErrType;
765 break;
767 vtUnlock(u->lk);
768 if(type == VtErrType || !scoreIsLocal(score))
769 return;
771 u = cacheGetLump(c, score, type, c->size);
772 if(u == nil)
773 return;
774 lumpDecRef(u, 1);
775 /* XXX remove extra reference */
776 lumpDecRef(u, 0);
777 return;
778 Exit:
779 vtUnlock(u->lk);
780 return;
784 void
785 lumpCleanup(Lump *u)
787 int i, n;
789 switch(u->type) {
790 default:
791 return;
792 case VtPointerType0:
793 case VtPointerType1:
794 case VtPointerType2:
795 case VtPointerType3:
796 case VtPointerType4:
797 case VtPointerType5:
798 case VtPointerType6:
799 n = u->asize/VtScoreSize;
800 break;
801 case VtDirType:
802 n = u->asize/VtEntrySize;
803 break;
806 for(i=0; i<n; i++)
807 lumpFreeEntry(u, i);
811 void
812 lumpDecRef(Lump *b, int unlock)
814 int i;
815 Cache *c;
817 if(b == nil)
818 return;
820 if(unlock)
821 vtUnlock(b->lk);
823 c = b->c;
824 vtLock(c->lk);
825 if(--b->ref > 0) {
826 vtUnlock(c->lk);
827 return;
829 assert(b->ref == 0);
831 switch(b->state) {
832 default:
833 fprint(2, "bad state: %s\n", lumpState(b->state));
834 assert(0);
835 case LumpActive:
836 /* hack - but will do for now */
837 b->ref++;
838 vtUnlock(c->lk);
839 lumpCleanup(b);
840 vtLock(c->lk);
841 b->ref--;
842 lumpSetState(b, LumpFree);
843 break;
844 case LumpZombie:
845 lumpSetState(b, LumpFree);
846 break;
847 case LumpFree:
848 case LumpVenti:
849 break;
852 /*
853 * reinsert in the free heap
854 */
855 if(b->heap == BadHeap) {
856 i = upHeap(c->nheap++, b);
857 c->heap[i] = b;
858 b->heap = i;
861 vtUnlock(c->lk);
864 Lump *
865 lumpIncRef(Lump *b)
867 Cache *c;
869 c = b->c;
871 vtLock(c->lk);
872 assert(b->ref > 0);
873 b->ref++;
874 vtUnlock(c->lk);
875 return b;