Blob


1 /*
2 * Manage tree of VtFiles stored in the block cache.
3 *
4 * The single point of truth for the info about the VtFiles themselves
5 * is the block data. Because of this, there is no explicit locking of
6 * VtFile structures, and indeed there may be more than one VtFile
7 * structure for a given Venti file. They synchronize through the
8 * block cache.
9 *
10 * This is a bit simpler than fossil because there are no epochs
11 * or tags or anything else. Just mutable local blocks and immutable
12 * Venti blocks.
13 */
15 #include <u.h>
16 #include <libc.h>
17 #include <venti.h>
19 #define MaxBlock (1UL<<31)
21 static char ENotDir[] = "walk in non-directory";
22 static char ETooBig[] = "file too big";
23 /* static char EBadAddr[] = "bad address"; */
24 static char ELabelMismatch[] = "label mismatch";
26 static int sizetodepth(uvlong s, int psize, int dsize);
27 static VtBlock *fileload(VtFile *r, VtEntry *e);
28 static int shrinkdepth(VtFile*, VtBlock*, VtEntry*, int);
29 static int shrinksize(VtFile*, VtEntry*, uvlong);
30 static int growdepth(VtFile*, VtBlock*, VtEntry*, int);
32 #define ISLOCKED(r) ((r)->b != nil)
33 #define DEPTH(t) ((t)&VtTypeDepthMask)
35 static VtFile *
36 vtfilealloc(VtCache *c, VtBlock *b, VtFile *p, u32int offset, int mode)
37 {
38 int epb;
39 u32int size;
40 VtEntry e;
41 VtFile *r;
43 assert(p==nil || ISLOCKED(p));
45 if(p == nil){
46 assert(offset == 0);
47 epb = 1;
48 }else
49 epb = p->dsize / VtEntrySize;
51 if(b->type != VtDirType){
52 werrstr("bad block type %#uo", b->type);
53 return nil;
54 }
56 /*
57 * a non-active entry is the only thing that
58 * can legitimately happen here. all the others
59 * get prints.
60 */
61 if(vtentryunpack(&e, b->data, offset % epb) < 0){
62 fprint(2, "vtentryunpack failed: %r (%.*H)\n", VtEntrySize, b->data+VtEntrySize*(offset%epb));
63 return nil;
64 }
65 if(!(e.flags & VtEntryActive)){
66 werrstr("entry not active");
67 return nil;
68 }
70 if(DEPTH(e.type) < sizetodepth(e.size, e.psize, e.dsize)){
71 fprint(2, "depth %ud size %llud psize %ud dsize %ud\n",
72 DEPTH(e.type), e.size, e.psize, e.dsize);
73 werrstr("bad depth");
74 return nil;
75 }
77 size = vtcacheblocksize(c);
78 if(e.dsize > size || e.psize > size){
79 werrstr("block sizes %ud, %ud bigger than cache block size %ud",
80 e.psize, e.dsize, size);
81 return nil;
82 }
84 r = vtmallocz(sizeof(VtFile));
85 r->c = c;
86 r->mode = mode;
87 r->dsize = e.dsize;
88 r->psize = e.psize;
89 r->gen = e.gen;
90 r->dir = (e.type & VtTypeBaseMask) == VtDirType;
91 r->ref = 1;
92 r->parent = p;
93 if(p){
94 qlock(&p->lk);
95 assert(mode == VtOREAD || p->mode == VtORDWR);
96 p->ref++;
97 qunlock(&p->lk);
98 }else{
99 assert(b->addr != NilBlock);
100 r->local = 1;
102 memmove(r->score, b->score, VtScoreSize);
103 r->offset = offset;
104 r->epb = epb;
106 return r;
109 VtFile *
110 vtfileroot(VtCache *c, u32int addr, int mode)
112 VtFile *r;
113 VtBlock *b;
115 b = vtcachelocal(c, addr, VtDirType);
116 if(b == nil)
117 return nil;
118 r = vtfilealloc(c, b, nil, 0, mode);
119 vtblockput(b);
120 return r;
123 VtFile*
124 vtfileopenroot(VtCache *c, VtEntry *e)
126 VtBlock *b;
127 VtFile *f;
129 b = vtcacheallocblock(c, VtDirType);
130 if(b == nil)
131 return nil;
133 vtentrypack(e, b->data, 0);
134 f = vtfilealloc(c, b, nil, 0, VtORDWR);
135 vtblockput(b);
136 return f;
139 VtFile *
140 vtfilecreateroot(VtCache *c, int psize, int dsize, int type)
142 VtEntry e;
144 memset(&e, 0, sizeof e);
145 e.flags = VtEntryActive;
146 e.psize = psize;
147 e.dsize = dsize;
148 e.type = type;
149 memmove(e.score, vtzeroscore, VtScoreSize);
151 return vtfileopenroot(c, &e);
154 VtFile *
155 vtfileopen(VtFile *r, u32int offset, int mode)
157 ulong bn;
158 VtBlock *b;
160 assert(ISLOCKED(r));
161 if(!r->dir){
162 werrstr(ENotDir);
163 return nil;
166 bn = offset/(r->dsize/VtEntrySize);
168 b = vtfileblock(r, bn, mode);
169 if(b == nil)
170 return nil;
171 r = vtfilealloc(r->c, b, r, offset, mode);
172 vtblockput(b);
173 return r;
176 VtFile*
177 vtfilecreate(VtFile *r, int psize, int dsize, int type)
179 return _vtfilecreate(r, -1, psize, dsize, type);
182 VtFile*
183 _vtfilecreate(VtFile *r, int o, int psize, int dsize, int type)
185 int i;
186 VtBlock *b;
187 u32int bn, size;
188 VtEntry e;
189 int epb;
190 VtFile *rr;
191 u32int offset;
193 assert(ISLOCKED(r));
194 assert(psize <= VtMaxLumpSize);
195 assert(dsize <= VtMaxLumpSize);
196 assert(type == VtDirType || type == VtDataType);
198 if(!r->dir){
199 werrstr(ENotDir);
200 return nil;
203 epb = r->dsize/VtEntrySize;
205 size = vtfilegetdirsize(r);
206 /*
207 * look at a random block to see if we can find an empty entry
208 */
209 if(o == -1){
210 offset = lnrand(size+1);
211 offset -= offset % epb;
212 }else
213 offset = o;
215 /* try the given block and then try the last block */
216 for(;;){
217 bn = offset/epb;
218 b = vtfileblock(r, bn, VtORDWR);
219 if(b == nil)
220 return nil;
221 for(i=offset%r->epb; i<epb; i++){
222 if(vtentryunpack(&e, b->data, i) < 0)
223 continue;
224 if((e.flags&VtEntryActive) == 0 && e.gen != ~0)
225 goto Found;
227 vtblockput(b);
228 if(offset == size){
229 fprint(2, "vtfilecreate: cannot happen\n");
230 werrstr("vtfilecreate: cannot happen");
231 return nil;
233 offset = size;
236 Found:
237 /* found an entry - gen already set */
238 e.psize = psize;
239 e.dsize = dsize;
240 e.flags = VtEntryActive;
241 e.type = type;
242 e.size = 0;
243 memmove(e.score, vtzeroscore, VtScoreSize);
244 vtentrypack(&e, b->data, i);
246 offset = bn*epb + i;
247 if(offset+1 > size){
248 if(vtfilesetdirsize(r, offset+1) < 0){
249 vtblockput(b);
250 return nil;
254 rr = vtfilealloc(r->c, b, r, offset, VtORDWR);
255 vtblockput(b);
256 return rr;
259 static int
260 vtfilekill(VtFile *r, int doremove)
262 VtEntry e;
263 VtBlock *b;
265 assert(ISLOCKED(r));
266 b = fileload(r, &e);
267 if(b == nil)
268 return -1;
270 if(doremove==0 && e.size == 0){
271 /* already truncated */
272 vtblockput(b);
273 return 0;
276 if(doremove){
277 if(e.gen != ~0)
278 e.gen++;
279 e.dsize = 0;
280 e.psize = 0;
281 e.flags = 0;
282 }else
283 e.flags &= ~VtEntryLocal;
284 e.type = 0;
285 e.size = 0;
286 memmove(e.score, vtzeroscore, VtScoreSize);
287 vtentrypack(&e, b->data, r->offset % r->epb);
288 vtblockput(b);
290 if(doremove){
291 vtfileunlock(r);
292 vtfileclose(r);
295 return 0;
298 int
299 vtfileremove(VtFile *r)
301 return vtfilekill(r, 1);
304 int
305 vtfiletruncate(VtFile *r)
307 return vtfilekill(r, 0);
310 uvlong
311 vtfilegetsize(VtFile *r)
313 VtEntry e;
314 VtBlock *b;
316 assert(ISLOCKED(r));
317 b = fileload(r, &e);
318 if(b == nil)
319 return ~(uvlong)0;
320 vtblockput(b);
322 return e.size;
325 static int
326 shrinksize(VtFile *r, VtEntry *e, uvlong size)
328 int i, depth, type, isdir, ppb;
329 uvlong ptrsz;
330 uchar score[VtScoreSize];
331 VtBlock *b;
333 b = vtcacheglobal(r->c, e->score, e->type);
334 if(b == nil)
335 return -1;
337 ptrsz = e->dsize;
338 ppb = e->psize/VtScoreSize;
339 type = e->type;
340 depth = DEPTH(type);
341 for(i=0; i+1<depth; i++)
342 ptrsz *= ppb;
344 isdir = r->dir;
345 while(depth > 0){
346 if(b->addr == NilBlock){
347 /* not worth copying the block just so we can zero some of it */
348 vtblockput(b);
349 return -1;
352 /*
353 * invariant: each pointer in the tree rooted at b accounts for ptrsz bytes
354 */
356 /* zero the pointers to unnecessary blocks */
357 i = (size+ptrsz-1)/ptrsz;
358 for(; i<ppb; i++)
359 memmove(b->data+i*VtScoreSize, vtzeroscore, VtScoreSize);
361 /* recurse (go around again) on the partially necessary block */
362 i = size/ptrsz;
363 size = size%ptrsz;
364 if(size == 0){
365 vtblockput(b);
366 return 0;
368 ptrsz /= ppb;
369 type--;
370 memmove(score, b->data+i*VtScoreSize, VtScoreSize);
371 vtblockput(b);
372 b = vtcacheglobal(r->c, score, type);
373 if(b == nil)
374 return -1;
377 if(b->addr == NilBlock){
378 vtblockput(b);
379 return -1;
382 /*
383 * No one ever truncates BtDir blocks.
384 */
385 if(depth==0 && !isdir && e->dsize > size)
386 memset(b->data+size, 0, e->dsize-size);
387 vtblockput(b);
388 return 0;
391 int
392 vtfilesetsize(VtFile *r, uvlong size)
394 int depth, edepth;
395 VtEntry e;
396 VtBlock *b;
398 assert(ISLOCKED(r));
399 if(size == 0)
400 return vtfiletruncate(r);
402 if(size > VtMaxFileSize || size > ((uvlong)MaxBlock)*r->dsize){
403 werrstr(ETooBig);
404 return -1;
407 b = fileload(r, &e);
408 if(b == nil)
409 return -1;
411 /* quick out */
412 if(e.size == size){
413 vtblockput(b);
414 return 0;
417 depth = sizetodepth(size, e.psize, e.dsize);
418 edepth = DEPTH(e.type);
419 if(depth < edepth){
420 if(shrinkdepth(r, b, &e, depth) < 0){
421 vtblockput(b);
422 return -1;
424 }else if(depth > edepth){
425 if(growdepth(r, b, &e, depth) < 0){
426 vtblockput(b);
427 return -1;
431 if(size < e.size)
432 shrinksize(r, &e, size);
434 e.size = size;
435 vtentrypack(&e, b->data, r->offset % r->epb);
436 vtblockput(b);
438 return 0;
441 int
442 vtfilesetdirsize(VtFile *r, u32int ds)
444 uvlong size;
445 int epb;
447 assert(ISLOCKED(r));
448 epb = r->dsize/VtEntrySize;
450 size = (uvlong)r->dsize*(ds/epb);
451 size += VtEntrySize*(ds%epb);
452 return vtfilesetsize(r, size);
455 u32int
456 vtfilegetdirsize(VtFile *r)
458 ulong ds;
459 uvlong size;
460 int epb;
462 assert(ISLOCKED(r));
463 epb = r->dsize/VtEntrySize;
465 size = vtfilegetsize(r);
466 ds = epb*(size/r->dsize);
467 ds += (size%r->dsize)/VtEntrySize;
468 return ds;
471 int
472 vtfilegetentry(VtFile *r, VtEntry *e)
474 VtBlock *b;
476 assert(ISLOCKED(r));
477 b = fileload(r, e);
478 if(b == nil)
479 return -1;
480 vtblockput(b);
482 return 0;
485 int
486 vtfilesetentry(VtFile *r, VtEntry *e)
488 VtBlock *b;
489 VtEntry ee;
491 assert(ISLOCKED(r));
492 b = fileload(r, &ee);
493 if(b == nil)
494 return -1;
495 vtentrypack(e, b->data, r->offset % r->epb);
496 vtblockput(b);
497 return 0;
500 static VtBlock *
501 blockwalk(VtBlock *p, int index, VtCache *c, int mode, VtEntry *e)
503 VtBlock *b;
504 int type;
505 uchar *score;
506 VtEntry oe;
508 switch(p->type){
509 case VtDataType:
510 assert(0);
511 case VtDirType:
512 type = e->type;
513 score = e->score;
514 break;
515 default:
516 type = p->type - 1;
517 score = p->data+index*VtScoreSize;
518 break;
520 //print("walk from %V/%d ty %d to %V ty %d\n", p->score, index, p->type, score, type);
522 if(mode == VtOWRITE && vtglobaltolocal(score) == NilBlock){
523 b = vtcacheallocblock(c, type);
524 if(b)
525 goto HaveCopy;
526 }else
527 b = vtcacheglobal(c, score, type);
529 if(b == nil || mode == VtOREAD)
530 return b;
532 if(vtglobaltolocal(b->score) != NilBlock)
533 return b;
535 oe = *e;
537 /*
538 * Copy on write.
539 */
540 e->flags |= VtEntryLocal;
542 b = vtblockcopy(b/*, e->tag, fs->ehi, fs->elo*/);
543 if(b == nil)
544 return nil;
546 HaveCopy:
547 if(p->type == VtDirType){
548 memmove(e->score, b->score, VtScoreSize);
549 vtentrypack(e, p->data, index);
550 }else{
551 memmove(p->data+index*VtScoreSize, b->score, VtScoreSize);
553 return b;
556 /*
557 * Change the depth of the VtFile r.
558 * The entry e for r is contained in block p.
559 */
560 static int
561 growdepth(VtFile *r, VtBlock *p, VtEntry *e, int depth)
563 VtBlock *b, *bb;
564 VtEntry oe;
566 assert(ISLOCKED(r));
567 assert(depth <= VtPointerDepth);
569 b = vtcacheglobal(r->c, e->score, e->type);
570 if(b == nil)
571 return -1;
573 oe = *e;
575 /*
576 * Keep adding layers until we get to the right depth
577 * or an error occurs.
578 */
579 while(DEPTH(e->type) < depth){
580 bb = vtcacheallocblock(r->c, e->type+1);
581 if(bb == nil)
582 break;
583 memmove(bb->data, b->score, VtScoreSize);
584 memmove(e->score, bb->score, VtScoreSize);
585 e->type++;
586 e->flags |= VtEntryLocal;
587 vtblockput(b);
588 b = bb;
591 vtentrypack(e, p->data, r->offset % r->epb);
592 vtblockput(b);
594 if(DEPTH(e->type) == depth)
595 return 0;
596 return -1;
599 static int
600 shrinkdepth(VtFile *r, VtBlock *p, VtEntry *e, int depth)
602 VtBlock *b, *nb, *ob, *rb;
603 VtEntry oe;
605 assert(ISLOCKED(r));
606 assert(depth <= VtPointerDepth);
608 rb = vtcacheglobal(r->c, e->score, e->type);
609 if(rb == nil)
610 return 0;
612 /*
613 * Walk down to the new root block.
614 * We may stop early, but something is better than nothing.
615 */
616 oe = *e;
618 ob = nil;
619 b = rb;
620 for(; DEPTH(e->type) > depth; e->type--){
621 nb = vtcacheglobal(r->c, b->data, e->type-1);
622 if(nb == nil)
623 break;
624 if(ob!=nil && ob!=rb)
625 vtblockput(ob);
626 ob = b;
627 b = nb;
630 if(b == rb){
631 vtblockput(rb);
632 return 0;
635 /*
636 * Right now, e points at the root block rb, b is the new root block,
637 * and ob points at b. To update:
639 * (i) change e to point at b
640 * (ii) zero the pointer ob -> b
641 * (iii) free the root block
643 * p (the block containing e) must be written before
644 * anything else.
645 */
647 /* (i) */
648 memmove(e->score, b->score, VtScoreSize);
649 vtentrypack(e, p->data, r->offset % r->epb);
651 /* (ii) */
652 memmove(ob->data, vtzeroscore, VtScoreSize);
654 /* (iii) */
655 vtblockput(rb);
656 if(ob!=nil && ob!=rb)
657 vtblockput(ob);
658 vtblockput(b);
660 if(DEPTH(e->type) == depth)
661 return 0;
662 return -1;
665 static int
666 mkindices(VtEntry *e, u32int bn, int *index)
668 int i, np;
670 memset(index, 0, (VtPointerDepth+1)*sizeof(int));
672 np = e->psize/VtScoreSize;
673 for(i=0; bn > 0; i++){
674 if(i >= VtPointerDepth){
675 werrstr("bad address 0x%lux", (ulong)bn);
676 return -1;
678 index[i] = bn % np;
679 bn /= np;
681 return i;
684 VtBlock *
685 vtfileblock(VtFile *r, u32int bn, int mode)
687 VtBlock *b, *bb;
688 int index[VtPointerDepth+1];
689 VtEntry e;
690 int i;
691 int m;
693 assert(ISLOCKED(r));
694 assert(bn != NilBlock);
696 b = fileload(r, &e);
697 if(b == nil)
698 return nil;
700 i = mkindices(&e, bn, index);
701 if(i < 0)
702 return nil;
703 if(i > DEPTH(e.type)){
704 if(mode == VtOREAD){
705 werrstr("bad address 0x%lux", (ulong)bn);
706 goto Err;
708 index[i] = 0;
709 if(growdepth(r, b, &e, i) < 0)
710 goto Err;
713 assert(b->type == VtDirType);
715 index[DEPTH(e.type)] = r->offset % r->epb;
717 /* mode for intermediate block */
718 m = mode;
719 if(m == VtOWRITE)
720 m = VtORDWR;
722 for(i=DEPTH(e.type); i>=0; i--){
723 bb = blockwalk(b, index[i], r->c, i==0 ? mode : m, &e);
724 if(bb == nil)
725 goto Err;
726 vtblockput(b);
727 b = bb;
729 return b;
730 Err:
731 vtblockput(b);
732 return nil;
735 int
736 vtfileblockscore(VtFile *r, u32int bn, uchar score[VtScoreSize])
738 VtBlock *b, *bb;
739 int index[VtPointerDepth+1];
740 VtEntry e;
741 int i;
743 assert(ISLOCKED(r));
744 assert(bn != NilBlock);
746 b = fileload(r, &e);
747 if(b == nil)
748 return -1;
750 i = mkindices(&e, bn, index);
751 if(i < 0){
752 vtblockput(b);
753 return -1;
755 if(i > DEPTH(e.type)){
756 memmove(score, vtzeroscore, VtScoreSize);
757 vtblockput(b);
758 return 0;
761 index[DEPTH(e.type)] = r->offset % r->epb;
763 for(i=DEPTH(e.type); i>=1; i--){
764 bb = blockwalk(b, index[i], r->c, VtOREAD, &e);
765 if(bb == nil)
766 goto Err;
767 vtblockput(b);
768 b = bb;
769 if(memcmp(b->score, vtzeroscore, VtScoreSize) == 0)
770 break;
773 memmove(score, b->data+index[0]*VtScoreSize, VtScoreSize);
774 vtblockput(b);
775 return 0;
777 Err:
778 vtblockput(b);
779 return -1;
782 void
783 vtfileincref(VtFile *r)
785 qlock(&r->lk);
786 r->ref++;
787 qunlock(&r->lk);
790 void
791 vtfileclose(VtFile *r)
793 if(r == nil)
794 return;
795 qlock(&r->lk);
796 r->ref--;
797 if(r->ref){
798 qunlock(&r->lk);
799 return;
801 assert(r->ref == 0);
802 qunlock(&r->lk);
803 if(r->parent)
804 vtfileclose(r->parent);
805 memset(r, ~0, sizeof(*r));
806 vtfree(r);
809 /*
810 * Retrieve the block containing the entry for r.
811 * If a snapshot has happened, we might need
812 * to get a new copy of the block. We avoid this
813 * in the common case by caching the score for
814 * the block and the last epoch in which it was valid.
816 * We use r->mode to tell the difference between active
817 * file system VtFiles (VtORDWR) and VtFiles for the
818 * snapshot file system (VtOREAD).
819 */
820 static VtBlock*
821 fileloadblock(VtFile *r, int mode)
823 char e[ERRMAX];
824 u32int addr;
825 VtBlock *b;
827 switch(r->mode){
828 default:
829 assert(0);
830 case VtORDWR:
831 assert(r->mode == VtORDWR);
832 if(r->local == 1){
833 b = vtcacheglobal(r->c, r->score, VtDirType);
834 if(b == nil)
835 return nil;
836 return b;
838 assert(r->parent != nil);
839 if(vtfilelock(r->parent, VtORDWR) < 0)
840 return nil;
841 b = vtfileblock(r->parent, r->offset/r->epb, VtORDWR);
842 vtfileunlock(r->parent);
843 if(b == nil)
844 return nil;
845 memmove(r->score, b->score, VtScoreSize);
846 r->local = 1;
847 return b;
849 case VtOREAD:
850 if(mode == VtORDWR){
851 werrstr("read/write lock of read-only file");
852 return nil;
854 addr = vtglobaltolocal(r->score);
855 if(addr == NilBlock)
856 return vtcacheglobal(r->c, r->score, VtDirType);
858 b = vtcachelocal(r->c, addr, VtDirType);
859 if(b)
860 return b;
862 /*
863 * If it failed because the epochs don't match, the block has been
864 * archived and reclaimed. Rewalk from the parent and get the
865 * new pointer. This can't happen in the VtORDWR case
866 * above because blocks in the current epoch don't get
867 * reclaimed. The fact that we're VtOREAD means we're
868 * a snapshot. (Or else the file system is read-only, but then
869 * the archiver isn't going around deleting blocks.)
870 */
871 rerrstr(e, sizeof e);
872 if(strcmp(e, ELabelMismatch) == 0){
873 if(vtfilelock(r->parent, VtOREAD) < 0)
874 return nil;
875 b = vtfileblock(r->parent, r->offset/r->epb, VtOREAD);
876 vtfileunlock(r->parent);
877 if(b){
878 fprint(2, "vtfilealloc: lost %V found %V\n",
879 r->score, b->score);
880 memmove(r->score, b->score, VtScoreSize);
881 return b;
884 return nil;
888 int
889 vtfilelock(VtFile *r, int mode)
891 VtBlock *b;
893 if(mode == -1)
894 mode = r->mode;
896 b = fileloadblock(r, mode);
897 if(b == nil)
898 return -1;
899 /*
900 * The fact that we are holding b serves as the
901 * lock entitling us to write to r->b.
902 */
903 assert(r->b == nil);
904 r->b = b;
905 return 0;
908 /*
909 * Lock two (usually sibling) VtFiles. This needs special care
910 * because the Entries for both vtFiles might be in the same block.
911 * We also try to lock blocks in left-to-right order within the tree.
912 */
913 int
914 vtfilelock2(VtFile *r, VtFile *rr, int mode)
916 VtBlock *b, *bb;
918 if(rr == nil)
919 return vtfilelock(r, mode);
921 if(mode == -1)
922 mode = r->mode;
924 if(r->parent==rr->parent && r->offset/r->epb == rr->offset/rr->epb){
925 b = fileloadblock(r, mode);
926 if(b == nil)
927 return -1;
928 vtblockduplock(b);
929 bb = b;
930 }else if(r->parent==rr->parent || r->offset > rr->offset){
931 bb = fileloadblock(rr, mode);
932 b = fileloadblock(r, mode);
933 }else{
934 b = fileloadblock(r, mode);
935 bb = fileloadblock(rr, mode);
937 if(b == nil || bb == nil){
938 if(b)
939 vtblockput(b);
940 if(bb)
941 vtblockput(bb);
942 return -1;
945 /*
946 * The fact that we are holding b and bb serves
947 * as the lock entitling us to write to r->b and rr->b.
948 */
949 r->b = b;
950 rr->b = bb;
951 return 0;
954 void
955 vtfileunlock(VtFile *r)
957 VtBlock *b;
959 if(r->b == nil){
960 fprint(2, "vtfileunlock: already unlocked\n");
961 abort();
963 b = r->b;
964 r->b = nil;
965 vtblockput(b);
968 static VtBlock*
969 fileload(VtFile *r, VtEntry *e)
971 VtBlock *b;
973 assert(ISLOCKED(r));
974 b = r->b;
975 if(vtentryunpack(e, b->data, r->offset % r->epb) < 0)
976 return nil;
977 vtblockduplock(b);
978 return b;
981 static int
982 sizetodepth(uvlong s, int psize, int dsize)
984 int np;
985 int d;
987 /* determine pointer depth */
988 np = psize/VtScoreSize;
989 s = (s + dsize - 1)/dsize;
990 for(d = 0; s > 1; d++)
991 s = (s + np - 1)/np;
992 return d;
995 long
996 vtfileread(VtFile *f, void *data, long count, vlong offset)
998 int frag;
999 VtBlock *b;
1000 VtEntry e;
1002 assert(ISLOCKED(f));
1004 vtfilegetentry(f, &e);
1005 if(count == 0)
1006 return 0;
1007 if(count < 0 || offset < 0){
1008 werrstr("vtfileread: bad offset or count");
1009 return -1;
1011 if(offset >= e.size)
1012 return 0;
1014 if(offset+count > e.size)
1015 count = e.size - offset;
1017 frag = offset % e.dsize;
1018 if(frag+count > e.dsize)
1019 count = e.dsize - frag;
1021 b = vtfileblock(f, offset/e.dsize, VtOREAD);
1022 if(b == nil)
1023 return -1;
1025 memmove(data, b->data+frag, count);
1026 vtblockput(b);
1027 return count;
1030 static long
1031 filewrite1(VtFile *f, void *data, long count, vlong offset)
1033 int frag, m;
1034 VtBlock *b;
1035 VtEntry e;
1037 vtfilegetentry(f, &e);
1038 if(count < 0 || offset < 0){
1039 werrstr("vtfilewrite: bad offset or count");
1040 return -1;
1043 frag = offset % e.dsize;
1044 if(frag+count > e.dsize)
1045 count = e.dsize - frag;
1047 m = VtORDWR;
1048 if(frag == 0 && count == e.dsize)
1049 m = VtOWRITE;
1051 b = vtfileblock(f, offset/e.dsize, m);
1052 if(b == nil)
1053 return -1;
1055 memmove(b->data+frag, data, count);
1057 if(offset+count > e.size){
1058 vtfilegetentry(f, &e);
1059 e.size = offset+count;
1060 vtfilesetentry(f, &e);
1063 vtblockput(b);
1064 return count;
1067 long
1068 vtfilewrite(VtFile *f, void *data, long count, vlong offset)
1070 long tot, m;
1072 assert(ISLOCKED(f));
1074 tot = 0;
1075 m = 0;
1076 while(tot < count){
1077 m = filewrite1(f, (char*)data+tot, count-tot, offset+tot);
1078 if(m <= 0)
1079 break;
1080 tot += m;
1082 if(tot==0)
1083 return m;
1084 return tot;
1087 static int
1088 flushblock(VtCache *c, VtBlock *bb, uchar score[VtScoreSize], int ppb, int epb,
1089 int type)
1091 u32int addr;
1092 VtBlock *b;
1093 VtEntry e;
1094 int i;
1096 addr = vtglobaltolocal(score);
1097 if(addr == NilBlock)
1098 return 0;
1100 if(bb){
1101 b = bb;
1102 if(memcmp(b->score, score, VtScoreSize) != 0)
1103 abort();
1104 }else
1105 if((b = vtcachelocal(c, addr, type)) == nil)
1106 return -1;
1108 switch(type){
1109 case VtDataType:
1110 break;
1112 case VtDirType:
1113 for(i=0; i<epb; i++){
1114 if(vtentryunpack(&e, b->data, i) < 0)
1115 goto Err;
1116 if(flushblock(c, nil, e.score, e.psize/VtScoreSize, e.dsize/VtEntrySize,
1117 e.type) < 0)
1118 goto Err;
1120 break;
1122 default: /* VtPointerTypeX */
1123 for(i=0; i<ppb; i++){
1124 if(flushblock(c, nil, b->data+VtScoreSize*i, ppb, epb, type-1) < 0)
1125 goto Err;
1127 break;
1130 if(vtblockwrite(b) < 0)
1131 goto Err;
1132 memmove(score, b->score, VtScoreSize);
1133 if(b != bb)
1134 vtblockput(b);
1135 return 0;
1137 Err:
1138 if(b != bb)
1139 vtblockput(b);
1140 return -1;
1143 int
1144 vtfileflush(VtFile *f)
1146 int ret;
1147 VtBlock *b;
1148 VtEntry e;
1150 assert(ISLOCKED(f));
1151 b = fileload(f, &e);
1152 if(!(e.flags&VtEntryLocal)){
1153 vtblockput(b);
1154 return 0;
1157 ret = flushblock(f->c, nil, e.score, e.psize/VtScoreSize, e.dsize/VtEntrySize,
1158 e.type);
1159 if(ret < 0){
1160 vtblockput(b);
1161 return -1;
1164 vtentrypack(&e, b->data, f->offset % f->epb);
1165 vtblockput(b);
1166 return 0;
1169 int
1170 vtfileflushbefore(VtFile *r, u64int offset)
1172 VtBlock *b, *bb;
1173 VtEntry e;
1174 int i, base, depth, ppb, epb, doflush;
1175 int index[VtPointerDepth+1], j, ret;
1176 VtBlock *bi[VtPointerDepth+2];
1177 uchar *score;
1179 assert(ISLOCKED(r));
1180 if(offset == 0)
1181 return 0;
1183 b = fileload(r, &e);
1184 if(b == nil)
1185 return -1;
1188 * compute path through tree for the last written byte and the next one.
1190 ret = -1;
1191 memset(bi, 0, sizeof bi);
1192 depth = DEPTH(e.type);
1193 bi[depth+1] = b;
1194 i = mkindices(&e, (offset-1)/e.dsize, index);
1195 if(i < 0)
1196 goto Err;
1197 if(i > depth)
1198 goto Err;
1199 ppb = e.psize / VtScoreSize;
1200 epb = e.dsize / VtEntrySize;
1203 * load the blocks along the last written byte
1205 index[depth] = r->offset % r->epb;
1206 for(i=depth; i>=0; i--){
1207 bb = blockwalk(b, index[i], r->c, VtORDWR, &e);
1208 if(bb == nil)
1209 goto Err;
1210 bi[i] = bb;
1211 b = bb;
1213 ret = 0;
1216 * walk up the path from leaf to root, flushing anything that
1217 * has been finished.
1219 base = e.type&~VtTypeDepthMask;
1220 for(i=0; i<=depth; i++){
1221 doflush = 0;
1222 if(i == 0){
1223 /* leaf: data or dir block */
1224 if(offset%e.dsize == 0)
1225 doflush = 1;
1226 }else{
1228 * interior node: pointer blocks.
1229 * specifically, b = bi[i] is a block whose index[i-1]'th entry
1230 * points at bi[i-1].
1232 b = bi[i];
1235 * the index entries up to but not including index[i-1] point at
1236 * finished blocks, so flush them for sure.
1238 for(j=0; j<index[i-1]; j++)
1239 if(flushblock(r->c, nil, b->data+j*VtScoreSize, ppb, epb, base+i-1) < 0)
1240 goto Err;
1243 * if index[i-1] is the last entry in the block and is global
1244 * (i.e. the kid is flushed), then we can flush this block.
1246 if(j==ppb-1 && vtglobaltolocal(b->data+j*VtScoreSize)==NilBlock)
1247 doflush = 1;
1249 if(doflush){
1250 if(i == depth)
1251 score = e.score;
1252 else
1253 score = bi[i+1]->data+index[i]*VtScoreSize;
1254 if(flushblock(r->c, bi[i], score, ppb, epb, base+i) < 0)
1255 goto Err;
1259 Err:
1260 /* top: entry. do this always so that the score is up-to-date */
1261 vtentrypack(&e, bi[depth+1]->data, index[depth]);
1262 for(i=0; i<nelem(bi); i++)
1263 if(bi[i])
1264 vtblockput(bi[i]);
1265 return ret;