2 * Manage tree of VtFiles stored in the block cache.
4 * The single point of truth for the info about the VtFiles themselves
5 * is the block data. Because of this, there is no explicit locking of
6 * VtFile structures, and indeed there may be more than one VtFile
7 * structure for a given Venti file. They synchronize through the
10 * This is a bit simpler than fossil because there are no epochs
11 * or tags or anything else. Just mutable local blocks and immutable
29 VtBlock *b; /* block containing this file */
30 uchar score[VtScoreSize]; /* score of block containing this file */
39 int epb; /* entries per block in parent */
40 u32int offset; /* entry offset in parent */
43 static char EBadEntry[] = "bad VtEntry";
44 static char ENotDir[] = "walk in non-directory";
45 static char ETooBig[] = "file too big";
46 static char EBadAddr[] = "bad address";
47 static char ELabelMismatch[] = "label mismatch";
49 static int sizetodepth(uvlong s, int psize, int dsize);
50 static VtBlock *fileload(VtFile *r, VtEntry *e);
51 static int shrinkdepth(VtFile*, VtBlock*, VtEntry*, int);
52 static int shrinksize(VtFile*, VtEntry*, uvlong);
53 static int growdepth(VtFile*, VtBlock*, VtEntry*, int);
55 #define ISLOCKED(r) ((r)->b != nil)
56 #define DEPTH(t) ((t)&VtTypeDepthMask)
59 vtfilealloc(VtCache *c, VtBlock *b, VtFile *p, u32int offset, int mode)
66 assert(p==nil || ISLOCKED(p));
72 epb = p->dsize / VtEntrySize;
74 if(b->type != VtDirType)
78 * a non-active entry is the only thing that
79 * can legitimately happen here. all the others
82 if(vtentryunpack(&e, b->data, offset % epb) < 0){
83 fprint(2, "vtentryunpack failed\n");
86 if(!(e.flags & VtEntryActive)){
87 if(0)fprint(2, "not active\n");
90 if(e.psize < 256 || e.dsize < 256){
91 fprint(2, "psize %ud dsize %ud\n", e.psize, e.dsize);
95 if(DEPTH(e.type) < sizetodepth(e.size, e.psize, e.dsize)){
96 fprint(2, "depth %ud size %llud psize %ud dsize %ud\n",
97 DEPTH(e.type), e.size, e.psize, e.dsize);
101 size = vtcacheblocksize(c);
102 if(e.dsize > size || e.psize > size){
103 fprint(2, "psize %ud dsize %ud blocksize %ud\n", e.psize, e.dsize, size);
107 r = vtmallocz(sizeof(VtFile));
112 r->dir = (e.flags & VtEntryDir) != 0;
117 assert(mode == VtOREAD || p->mode == VtORDWR);
121 memmove(r->score, b->score, VtScoreSize);
133 vtfileroot(VtCache *c, u32int addr, int mode)
138 b = vtcachelocal(c, addr, VtDirType);
142 r = vtfilealloc(c, b, nil, 0, mode);
148 vtfileopenroot(VtCache *c, VtEntry *e)
153 b = vtcacheallocblock(c, VtDirType);
157 vtentrypack(e, b->data, 0);
158 f = vtfilealloc(c, b, nil, 0, VtORDWR);
164 vtfilecreateroot(VtCache *c, int psize, int dsize, int type)
168 memset(&e, 0, sizeof e);
169 e.flags = VtEntryActive;
172 if(type == VtDirType)
173 e.flags |= VtEntryDir;
174 memmove(e.score, vtzeroscore, VtScoreSize);
176 return vtfileopenroot(c, &e);
180 vtfileopen(VtFile *r, u32int offset, int mode)
191 bn = offset/(r->dsize/VtEntrySize);
193 b = vtfileblock(r, bn, mode);
196 r = vtfilealloc(r->c, b, r, offset, mode);
202 vtfilecreate(VtFile *r, int psize, int dsize, int dir)
219 epb = r->dsize/VtEntrySize;
221 size = vtfilegetdirsize(r);
223 * look at a random block to see if we can find an empty entry
225 offset = lnrand(size+1);
226 offset -= offset % epb;
228 /* try the given block and then try the last block */
231 b = vtfileblock(r, bn, VtORDWR);
234 for(i=offset%r->epb; i<epb; i++){
235 if(vtentryunpack(&e, b->data, i) < 0)
237 if((e.flags&VtEntryActive) == 0 && e.gen != ~0)
242 fprint(2, "vtfilecreate: cannot happen\n");
243 werrstr("vtfilecreate: cannot happen");
250 /* found an entry - gen already set */
253 e.flags = VtEntryActive;
254 e.type = dir ? VtDirType : VtDataType;
256 memmove(e.score, vtzeroscore, VtScoreSize);
257 vtentrypack(&e, b->data, i);
261 if(vtfilesetdirsize(r, offset+1) < 0){
267 rr = vtfilealloc(r->c, b, r, offset, VtORDWR);
273 vtfilekill(VtFile *r, int doremove)
283 if(doremove==0 && e.size == 0){
284 /* already truncated */
296 e.flags &= ~VtEntryLocal;
299 memmove(e.score, vtzeroscore, VtScoreSize);
300 vtentrypack(&e, b->data, r->offset % r->epb);
312 vtfileremove(VtFile *r)
314 return vtfilekill(r, 1);
318 vtfiletruncate(VtFile *r)
320 return vtfilekill(r, 0);
324 vtfilegetsize(VtFile *r)
339 shrinksize(VtFile *r, VtEntry *e, uvlong size)
341 int i, depth, type, isdir, ppb;
343 uchar score[VtScoreSize];
346 b = vtcacheglobal(r->c, e->score, e->type);
351 ppb = e->psize/VtScoreSize;
354 for(i=0; i+1<depth; i++)
359 if(b->addr == NilBlock){
360 /* not worth copying the block just so we can zero some of it */
366 * invariant: each pointer in the tree rooted at b accounts for ptrsz bytes
369 /* zero the pointers to unnecessary blocks */
370 i = (size+ptrsz-1)/ptrsz;
372 memmove(b->data+i*VtScoreSize, vtzeroscore, VtScoreSize);
374 /* recurse (go around again) on the partially necessary block */
383 memmove(score, b->data+i*VtScoreSize, VtScoreSize);
385 b = vtcacheglobal(r->c, score, type);
390 if(b->addr == NilBlock){
396 * No one ever truncates BtDir blocks.
398 if(depth==0 && !isdir && e->dsize > size)
399 memset(b->data+size, 0, e->dsize-size);
405 vtfilesetsize(VtFile *r, uvlong size)
413 return vtfiletruncate(r);
415 if(size > VtMaxFileSize || size > ((uvlong)MaxBlock)*r->dsize){
430 depth = sizetodepth(size, e.psize, e.dsize);
431 edepth = DEPTH(e.type);
433 if(shrinkdepth(r, b, &e, depth) < 0){
437 }else if(depth > edepth){
438 if(growdepth(r, b, &e, depth) < 0){
445 shrinksize(r, &e, size);
448 vtentrypack(&e, b->data, r->offset % r->epb);
455 vtfilesetdirsize(VtFile *r, u32int ds)
461 epb = r->dsize/VtEntrySize;
463 size = (uvlong)r->dsize*(ds/epb);
464 size += VtEntrySize*(ds%epb);
465 return vtfilesetsize(r, size);
469 vtfilegetdirsize(VtFile *r)
476 epb = r->dsize/VtEntrySize;
478 size = vtfilegetsize(r);
479 ds = epb*(size/r->dsize);
480 ds += (size%r->dsize)/VtEntrySize;
485 vtfilegetentry(VtFile *r, VtEntry *e)
499 vtfilesetentry(VtFile *r, VtEntry *e)
505 b = fileload(r, &ee);
508 vtentrypack(e, b->data, r->offset % r->epb);
514 blockwalk(VtBlock *p, int index, VtCache *c, int mode, VtEntry *e)
530 score = p->data+index*VtScoreSize;
533 //print("walk from %V/%d ty %d to %V ty %d\n", p->score, index, p->type, score, type);
535 if(mode == VtOWRITE && vtglobaltolocal(score) == NilBlock){
536 b = vtcacheallocblock(c, type);
540 b = vtcacheglobal(c, score, type);
542 if(b == nil || mode == VtOREAD)
545 if(vtglobaltolocal(b->score) != NilBlock)
553 e->flags |= VtEntryLocal;
555 b = vtblockcopy(b/*, e->tag, fs->ehi, fs->elo*/);
560 if(p->type == VtDirType){
561 memmove(e->score, b->score, VtScoreSize);
562 vtentrypack(e, p->data, index);
564 memmove(p->data+index*VtScoreSize, b->score, VtScoreSize);
570 * Change the depth of the VtFile r.
571 * The entry e for r is contained in block p.
574 growdepth(VtFile *r, VtBlock *p, VtEntry *e, int depth)
580 assert(depth <= VtPointerDepth);
582 b = vtcacheglobal(r->c, e->score, e->type);
589 * Keep adding layers until we get to the right depth
590 * or an error occurs.
592 while(DEPTH(e->type) < depth){
593 bb = vtcacheallocblock(r->c, e->type+1);
596 memmove(bb->data, b->score, VtScoreSize);
597 memmove(e->score, bb->score, VtScoreSize);
599 e->flags |= VtEntryLocal;
604 vtentrypack(e, p->data, r->offset % r->epb);
607 if(DEPTH(e->type) == depth)
613 shrinkdepth(VtFile *r, VtBlock *p, VtEntry *e, int depth)
615 VtBlock *b, *nb, *ob, *rb;
619 assert(depth <= VtPointerDepth);
621 rb = vtcacheglobal(r->c, e->score, e->type);
626 * Walk down to the new root block.
627 * We may stop early, but something is better than nothing.
633 for(; DEPTH(e->type) > depth; e->type--){
634 nb = vtcacheglobal(r->c, b->data, e->type-1);
637 if(ob!=nil && ob!=rb)
649 * Right now, e points at the root block rb, b is the new root block,
650 * and ob points at b. To update:
652 * (i) change e to point at b
653 * (ii) zero the pointer ob -> b
654 * (iii) free the root block
656 * p (the block containing e) must be written before
661 memmove(e->score, b->score, VtScoreSize);
662 vtentrypack(e, p->data, r->offset % r->epb);
665 memmove(ob->data, vtzeroscore, VtScoreSize);
669 if(ob!=nil && ob!=rb)
673 if(DEPTH(e->type) == depth)
679 mkindices(VtEntry *e, u32int bn, int *index)
683 memset(index, 0, VtPointerDepth*sizeof(int));
685 np = e->psize/VtScoreSize;
686 for(i=0; bn > 0; i++){
687 if(i >= VtPointerDepth){
698 vtfileblock(VtFile *r, u32int bn, int mode)
701 int index[VtPointerDepth+1];
707 assert(bn != NilBlock);
713 i = mkindices(&e, bn, index);
716 if(i > DEPTH(e.type)){
722 if(growdepth(r, b, &e, i) < 0)
726 assert(b->type == VtDirType);
728 index[DEPTH(e.type)] = r->offset % r->epb;
730 /* mode for intermediate block */
735 for(i=DEPTH(e.type); i>=0; i--){
736 bb = blockwalk(b, index[i], r->c, i==0 ? mode : m, &e);
749 vtfileblockhash(VtFile *r, u32int bn, uchar score[VtScoreSize])
752 int index[VtPointerDepth+1];
757 assert(bn != NilBlock);
763 i = mkindices(&e, bn, index);
768 if(i > DEPTH(e.type)){
769 memmove(score, vtzeroscore, VtScoreSize);
774 index[DEPTH(e.type)] = r->offset % r->epb;
776 for(i=DEPTH(e.type); i>=1; i--){
777 bb = blockwalk(b, index[i], r->c, VtOREAD, &e);
782 if(memcmp(b->score, vtzeroscore, VtScoreSize) == 0)
786 memmove(score, b->data+index[0]*VtScoreSize, VtScoreSize);
791 fprint(2, "vtfileblockhash: %r\n");
797 vtfileincref(VtFile *r)
805 vtfileclose(VtFile *r)
818 vtfileclose(r->parent);
819 memset(r, ~0, sizeof(*r));
824 * Retrieve the block containing the entry for r.
825 * If a snapshot has happened, we might need
826 * to get a new copy of the block. We avoid this
827 * in the common case by caching the score for
828 * the block and the last epoch in which it was valid.
830 * We use r->mode to tell the difference between active
831 * file system VtFiles (VtORDWR) and VtFiles for the
832 * snapshot file system (VtOREAD).
835 fileloadblock(VtFile *r, int mode)
845 assert(r->mode == VtORDWR);
847 b = vtcacheglobal(r->c, r->score, VtDirType);
852 assert(r->parent != nil);
853 if(vtfilelock(r->parent, VtORDWR) < 0)
855 b = vtfileblock(r->parent, r->offset/r->epb, VtORDWR);
856 vtfileunlock(r->parent);
859 memmove(r->score, b->score, VtScoreSize);
865 werrstr("read/write lock of read-only file");
868 addr = vtglobaltolocal(r->score);
870 return vtcacheglobal(r->c, r->score, VtDirType);
872 b = vtcachelocal(r->c, addr, VtDirType);
877 * If it failed because the epochs don't match, the block has been
878 * archived and reclaimed. Rewalk from the parent and get the
879 * new pointer. This can't happen in the VtORDWR case
880 * above because blocks in the current epoch don't get
881 * reclaimed. The fact that we're VtOREAD means we're
882 * a snapshot. (Or else the file system is read-only, but then
883 * the archiver isn't going around deleting blocks.)
885 rerrstr(e, sizeof e);
886 if(strcmp(e, ELabelMismatch) == 0){
887 if(vtfilelock(r->parent, VtOREAD) < 0)
889 b = vtfileblock(r->parent, r->offset/r->epb, VtOREAD);
890 vtfileunlock(r->parent);
892 fprint(2, "vtfilealloc: lost %V found %V\n",
894 memmove(r->score, b->score, VtScoreSize);
903 vtfilelock(VtFile *r, int mode)
910 b = fileloadblock(r, mode);
914 * The fact that we are holding b serves as the
915 * lock entitling us to write to r->b.
923 * Lock two (usually sibling) VtFiles. This needs special care
924 * because the Entries for both vtFiles might be in the same block.
925 * We also try to lock blocks in left-to-right order within the tree.
928 vtfilelock2(VtFile *r, VtFile *rr, int mode)
933 return vtfilelock(r, mode);
938 if(r->parent==rr->parent && r->offset/r->epb == rr->offset/rr->epb){
939 b = fileloadblock(r, mode);
944 }else if(r->parent==rr->parent || r->offset > rr->offset){
945 bb = fileloadblock(rr, mode);
946 b = fileloadblock(r, mode);
948 b = fileloadblock(r, mode);
949 bb = fileloadblock(rr, mode);
951 if(b == nil || bb == nil){
960 * The fact that we are holding b and bb serves
961 * as the lock entitling us to write to r->b and rr->b.
969 vtfileunlock(VtFile *r)
974 fprint(2, "vtfileunlock: already unlocked\n");
983 fileload(VtFile *r, VtEntry *e)
989 if(vtentryunpack(e, b->data, r->offset % r->epb) < 0)
996 sizetodepth(uvlong s, int psize, int dsize)
1001 /* determine pointer depth */
1002 np = psize/VtScoreSize;
1003 s = (s + dsize - 1)/dsize;
1004 for(d = 0; s > 1; d++)
1005 s = (s + np - 1)/np;
1010 vtfileread(VtFile *f, void *data, long count, vlong offset)
1016 assert(ISLOCKED(f));
1018 vtfilegetentry(f, &e);
1021 if(count < 0 || offset < 0){
1022 werrstr("vtfileread: bad offset or count");
1025 if(offset >= e.size)
1028 if(offset+count > e.size)
1029 count = e.size - offset;
1031 frag = offset % e.dsize;
1032 if(frag+count > e.dsize)
1033 count = e.dsize - frag;
1035 b = vtfileblock(f, offset/e.dsize, VtOREAD);
1039 memmove(data, b->data+frag, count);
1045 filewrite1(VtFile *f, void *data, long count, vlong offset)
1051 vtfilegetentry(f, &e);
1052 if(count < 0 || offset < 0){
1053 werrstr("vtfilewrite: bad offset or count");
1057 frag = offset % e.dsize;
1058 if(frag+count > e.dsize)
1059 count = e.dsize - frag;
1062 if(frag == 0 && count == e.dsize)
1065 b = vtfileblock(f, offset/e.dsize, m);
1069 memmove(b->data+frag, data, count);
1071 if(offset+count > e.size){
1072 vtfilegetentry(f, &e);
1073 e.size = offset+count;
1074 vtfilesetentry(f, &e);
1082 vtfilewrite(VtFile *f, void *data, long count, vlong offset)
1086 assert(ISLOCKED(f));
1091 m = filewrite1(f, (char*)data+tot, count-tot, offset+tot);
1102 flushblock(VtCache *c, VtBlock *bb, uchar score[VtScoreSize], int ppb, int epb,
1110 addr = vtglobaltolocal(score);
1111 if(addr == NilBlock)
1116 if(memcmp(b->score, score, VtScoreSize) != 0)
1119 if((b = vtcachelocal(c, addr, type)) == nil)
1127 for(i=0; i<epb; i++){
1128 if(vtentryunpack(&e, b->data, i) < 0)
1130 if(flushblock(c, nil, e.score, e.psize/VtScoreSize, e.dsize/VtEntrySize,
1136 default: /* VtPointerTypeX */
1137 for(i=0; i<ppb; i++){
1138 if(flushblock(c, nil, b->data+VtScoreSize*i, ppb, epb, type-1) < 0)
1144 if(vtblockwrite(b) < 0)
1146 memmove(score, b->score, VtScoreSize);
1158 vtfileflush(VtFile *f)
1164 assert(ISLOCKED(f));
1165 b = fileload(f, &e);
1166 if(!(e.flags&VtEntryLocal)){
1171 ret = flushblock(f->c, nil, e.score, e.psize/VtScoreSize, e.dsize/VtEntrySize,
1178 vtentrypack(&e, b->data, f->offset % f->epb);
1184 vtfileflushbefore(VtFile *r, u64int offset)
1188 int i, base, depth, ppb, epb, ok;
1189 int index[VtPointerDepth+1], index1[VtPointerDepth+1], j, ret;
1190 VtBlock *bi[VtPointerDepth+2];
1193 assert(ISLOCKED(r));
1197 b = fileload(r, &e);
1202 memset(bi, 0, sizeof bi);
1203 depth = DEPTH(e.type);
1205 i = mkindices(&e, (offset-1)/e.dsize, index);
1210 mkindices(&e, offset/e.dsize, index1);
1211 ppb = e.psize / VtScoreSize;
1212 epb = e.dsize / VtEntrySize;
1214 index[depth] = r->offset % r->epb;
1215 for(i=depth; i>=0; i--){
1216 bb = blockwalk(b, index[i], r->c, VtORDWR, &e);
1224 base = e.type&~VtTypeDepthMask;
1225 for(i=0; i<depth; i++){
1227 /* bottom: data or dir block */
1228 ok = offset%e.dsize == 0;
1230 /* middle: pointer blocks */
1233 * flush everything up to the break
1235 for(j=0; j<index[i-1]; j++)
1236 if(flushblock(r->c, nil, b->data+j*VtScoreSize, ppb, epb, base+i-1) < 0)
1239 * if the rest of the block is already flushed,
1240 * we can flush the whole block.
1244 if(vtglobaltolocal(b->data+j*VtScoreSize) != NilBlock)
1251 score = bi[i+1]->data+index[i]*VtScoreSize;
1252 if(flushblock(r->c, bi[i], score, ppb, epb, base+i) < 0)
1258 /* top: entry. do this always so that the score is up-to-date */
1259 vtentrypack(&e, bi[depth+1]->data, index[depth]);
1260 for(i=0; i<nelem(bi); i++)