7 * Lock watcher. Check that locking of blocks is always down.
9 * This is REALLY slow, and it won't work when the blocks aren't
10 * arranged in a tree (e.g., after the first snapshot). But it's great
20 * Thread-specific watch state.
22 typedef struct WThread WThread;
25 Block *b[MaxLock]; /* blocks currently held */
30 typedef struct WMap WMap;
31 typedef struct WEntry WEntry;
49 WEntry *hchild[HashSize];
50 WEntry *hparent[HashSize];
55 static uint blockSize;
60 hash(uchar score[VtScoreSize])
65 for(i=0; i<VtScoreSize; i++)
74 memset(e, 0, sizeof(WEntry));
87 w = vtmallocz(1024*sizeof(WEntry));
93 memset(w, 0, sizeof(WEntry));
98 * remove all dependencies with score as a parent
101 _bwatchResetParent(uchar *score)
107 for(w=map.hparent[h]; w; w=next){
109 if(memcmp(w->p, score, VtScoreSize) == 0){
111 w->pnext->pprev = w->pprev;
113 w->pprev->pnext = w->pnext;
115 map.hparent[h] = w->pnext;
117 w->cnext->cprev = w->cprev;
119 w->cprev->cnext = w->cnext;
121 map.hchild[hash(w->c)] = w->cnext;
130 _bwatchResetChild(uchar *score)
136 for(w=map.hchild[h]; w; w=next){
138 if(memcmp(w->c, score, VtScoreSize) == 0){
140 w->pnext->pprev = w->pprev;
142 w->pprev->pnext = w->pnext;
144 map.hparent[hash(w->p)] = w->pnext;
146 w->cnext->cprev = w->cprev;
148 w->cprev->cnext = w->cnext;
150 map.hchild[h] = w->cnext;
157 parent(uchar c[VtScoreSize], int *off)
163 for(w=map.hchild[h]; w; w=w->cnext)
164 if(memcmp(w->c, c, VtScoreSize) == 0){
172 addChild(uchar p[VtEntrySize], uchar c[VtEntrySize], int off)
178 memmove(w->p, p, VtScoreSize);
179 memmove(w->c, c, VtScoreSize);
183 w->pnext = map.hparent[h];
189 w->cnext = map.hchild[h];
196 bwatchReset(uchar score[VtScoreSize])
199 _bwatchResetParent(score);
200 _bwatchResetChild(score);
212 bwatchSetBlockSize(uint bs)
223 if(w == nil || w->pid != getpid()){
224 w = vtmallocz(sizeof(WThread));
232 * Derive dependencies from the contents of b.
235 bwatchDependency(Block *b)
244 _bwatchResetParent(b->score);
251 epb = blockSize / VtEntrySize;
252 for(i=0; i<epb; i++){
253 entryUnpack(&e, b->data, i);
254 if(!(e.flags & VtEntryActive))
256 addChild(b->score, e.score, i);
261 ppb = blockSize / VtScoreSize;
263 addChild(b->score, b->data+i*VtScoreSize, i);
283 lockConflicts(uchar xhave[VtScoreSize], uchar xwant[VtScoreSize])
286 int havedepth, wantdepth, havepos, wantpos;
291 havedepth = depth(have);
292 wantdepth = depth(want);
295 * walk one or the other up until they're both
302 while(wantdepth > havedepth){
304 want = parent(want, &wantpos);
306 while(havedepth > wantdepth){
308 have = parent(have, &havepos);
312 * walk them up simultaneously until we reach
315 while(have && want && memcmp(have, want, VtScoreSize) != 0){
316 have = parent(have, &havepos);
317 want = parent(want, &wantpos);
321 * not part of same tree. happens mainly with
322 * newly allocated blocks.
328 * never walked want: means we want to lock
329 * an ancestor of have. no no.
335 * never walked have: means we want to lock a
336 * child of have. that's okay.
342 * walked both: they're from different places in the tree.
343 * require that the left one be locked before the right one.
344 * (this is questionable, but it puts a total order on the block tree).
346 return havepos < wantpos;
355 snprint(buf, sizeof buf, "#p/%d/ctl", getpid());
356 fd = open(buf, OWRITE);
357 write(fd, "stop", 4);
362 * Check whether the calling thread can validly lock b.
363 * That is, check that the calling thread doesn't hold
364 * locks for any of b's children.
375 if(b->part != PartData)
380 for(i=0; i<w->nb; i++){
381 if(lockConflicts(w->b[i]->score, b->score)){
382 fprint(2, "%d: have block %V; shouldn't lock %V\n",
383 w->pid, w->b[i]->score, b->score);
388 if(w->nb >= MaxLock){
389 fprint(2, "%d: too many blocks held\n", w->pid);
396 * Note that the calling thread is about to unlock b.
399 bwatchUnlock(Block *b)
407 if(b->part != PartData)
411 for(i=0; i<w->nb; i++)
415 fprint(2, "%d: unlock of unlocked block %V\n", w->pid, b->score);
418 w->b[i] = w->b[--w->nb];