Blob


1 #include "stdinc.h"
2 #include "dat.h"
3 #include "fns.h"
4 #include "error.h"
6 /*
7 * Lock watcher. Check that locking of blocks is always down.
8 *
9 * This is REALLY slow, and it won't work when the blocks aren't
10 * arranged in a tree (e.g., after the first snapshot). But it's great
11 * for debugging.
12 */
13 enum
14 {
15 MaxLock = 16,
16 HashSize = 1009,
17 };
19 /*
20 * Thread-specific watch state.
21 */
22 typedef struct WThread WThread;
23 struct WThread
24 {
25 Block *b[MaxLock]; /* blocks currently held */
26 uint nb;
27 uint pid;
28 };
30 typedef struct WMap WMap;
31 typedef struct WEntry WEntry;
33 struct WEntry
34 {
35 uchar c[VtScoreSize];
36 uchar p[VtScoreSize];
37 int off;
39 WEntry *cprev;
40 WEntry *cnext;
41 WEntry *pprev;
42 WEntry *pnext;
43 };
45 struct WMap
46 {
47 QLock lk;
49 WEntry *hchild[HashSize];
50 WEntry *hparent[HashSize];
51 };
53 static WMap map;
54 static void **wp;
55 static uint blockSize;
56 static WEntry *pool;
57 uint bwatchDisabled;
59 static uint
60 hash(uchar score[VtScoreSize])
61 {
62 uint i, h;
64 h = 0;
65 for(i=0; i<VtScoreSize; i++)
66 h = h*37 + score[i];
67 return h%HashSize;
68 }
70 #include <pool.h>
71 static void
72 freeWEntry(WEntry *e)
73 {
74 memset(e, 0, sizeof(WEntry));
75 e->pnext = pool;
76 pool = e;
77 }
79 static WEntry*
80 allocWEntry(void)
81 {
82 int i;
83 WEntry *w;
85 w = pool;
86 if(w == nil){
87 w = vtmallocz(1024*sizeof(WEntry));
88 for(i=0; i<1024; i++)
89 freeWEntry(&w[i]);
90 w = pool;
91 }
92 pool = w->pnext;
93 memset(w, 0, sizeof(WEntry));
94 return w;
95 }
97 /*
98 * remove all dependencies with score as a parent
99 */
100 static void
101 _bwatchResetParent(uchar *score)
103 WEntry *w, *next;
104 uint h;
106 h = hash(score);
107 for(w=map.hparent[h]; w; w=next){
108 next = w->pnext;
109 if(memcmp(w->p, score, VtScoreSize) == 0){
110 if(w->pnext)
111 w->pnext->pprev = w->pprev;
112 if(w->pprev)
113 w->pprev->pnext = w->pnext;
114 else
115 map.hparent[h] = w->pnext;
116 if(w->cnext)
117 w->cnext->cprev = w->cprev;
118 if(w->cprev)
119 w->cprev->cnext = w->cnext;
120 else
121 map.hchild[hash(w->c)] = w->cnext;
122 freeWEntry(w);
126 /*
127 * and child
128 */
129 static void
130 _bwatchResetChild(uchar *score)
132 WEntry *w, *next;
133 uint h;
135 h = hash(score);
136 for(w=map.hchild[h]; w; w=next){
137 next = w->cnext;
138 if(memcmp(w->c, score, VtScoreSize) == 0){
139 if(w->pnext)
140 w->pnext->pprev = w->pprev;
141 if(w->pprev)
142 w->pprev->pnext = w->pnext;
143 else
144 map.hparent[hash(w->p)] = w->pnext;
145 if(w->cnext)
146 w->cnext->cprev = w->cprev;
147 if(w->cprev)
148 w->cprev->cnext = w->cnext;
149 else
150 map.hchild[h] = w->cnext;
151 freeWEntry(w);
156 static uchar*
157 parent(uchar c[VtScoreSize], int *off)
159 WEntry *w;
160 uint h;
162 h = hash(c);
163 for(w=map.hchild[h]; w; w=w->cnext)
164 if(memcmp(w->c, c, VtScoreSize) == 0){
165 *off = w->off;
166 return w->p;
168 return nil;
171 static void
172 addChild(uchar p[VtEntrySize], uchar c[VtEntrySize], int off)
174 uint h;
175 WEntry *w;
177 w = allocWEntry();
178 memmove(w->p, p, VtScoreSize);
179 memmove(w->c, c, VtScoreSize);
180 w->off = off;
182 h = hash(p);
183 w->pnext = map.hparent[h];
184 if(w->pnext)
185 w->pnext->pprev = w;
186 map.hparent[h] = w;
188 h = hash(c);
189 w->cnext = map.hchild[h];
190 if(w->cnext)
191 w->cnext->cprev = w;
192 map.hchild[h] = w;
195 void
196 bwatchReset(uchar score[VtScoreSize])
198 qlock(&map.lk);
199 _bwatchResetParent(score);
200 _bwatchResetChild(score);
201 qunlock(&map.lk);
204 void
205 bwatchInit(void)
207 wp = privalloc();
208 *wp = nil;
211 void
212 bwatchSetBlockSize(uint bs)
214 blockSize = bs;
217 static WThread*
218 getWThread(void)
220 WThread *w;
222 w = *wp;
223 if(w == nil || w->pid != getpid()){
224 w = vtmallocz(sizeof(WThread));
225 *wp = w;
226 w->pid = getpid();
228 return w;
231 /*
232 * Derive dependencies from the contents of b.
233 */
234 void
235 bwatchDependency(Block *b)
237 int i, epb, ppb;
238 Entry e;
240 if(bwatchDisabled)
241 return;
243 qlock(&map.lk);
244 _bwatchResetParent(b->score);
246 switch(b->l.type){
247 case BtData:
248 break;
250 case BtDir:
251 epb = blockSize / VtEntrySize;
252 for(i=0; i<epb; i++){
253 entryUnpack(&e, b->data, i);
254 if(!(e.flags & VtEntryActive))
255 continue;
256 addChild(b->score, e.score, i);
258 break;
260 default:
261 ppb = blockSize / VtScoreSize;
262 for(i=0; i<ppb; i++)
263 addChild(b->score, b->data+i*VtScoreSize, i);
264 break;
266 qunlock(&map.lk);
269 static int
270 depth(uchar *s)
272 int d, x;
274 d = -1;
275 while(s){
276 d++;
277 s = parent(s, &x);
279 return d;
282 static int
283 lockConflicts(uchar xhave[VtScoreSize], uchar xwant[VtScoreSize])
285 uchar *have, *want;
286 int havedepth, wantdepth, havepos, wantpos;
288 have = xhave;
289 want = xwant;
291 havedepth = depth(have);
292 wantdepth = depth(want);
294 /*
295 * walk one or the other up until they're both
296 * at the same level.
297 */
298 havepos = -1;
299 wantpos = -1;
300 have = xhave;
301 want = xwant;
302 while(wantdepth > havedepth){
303 wantdepth--;
304 want = parent(want, &wantpos);
306 while(havedepth > wantdepth){
307 havedepth--;
308 have = parent(have, &havepos);
311 /*
312 * walk them up simultaneously until we reach
313 * a common ancestor.
314 */
315 while(have && want && memcmp(have, want, VtScoreSize) != 0){
316 have = parent(have, &havepos);
317 want = parent(want, &wantpos);
320 /*
321 * not part of same tree. happens mainly with
322 * newly allocated blocks.
323 */
324 if(!have || !want)
325 return 0;
327 /*
328 * never walked want: means we want to lock
329 * an ancestor of have. no no.
330 */
331 if(wantpos == -1)
332 return 1;
334 /*
335 * never walked have: means we want to lock a
336 * child of have. that's okay.
337 */
338 if(havepos == -1)
339 return 0;
341 /*
342 * walked both: they're from different places in the tree.
343 * require that the left one be locked before the right one.
344 * (this is questionable, but it puts a total order on the block tree).
345 */
346 return havepos < wantpos;
349 static void
350 stop(void)
352 int fd;
353 char buf[32];
355 snprint(buf, sizeof buf, "#p/%d/ctl", getpid());
356 fd = open(buf, OWRITE);
357 write(fd, "stop", 4);
358 close(fd);
361 /*
362 * Check whether the calling thread can validly lock b.
363 * That is, check that the calling thread doesn't hold
364 * locks for any of b's children.
365 */
366 void
367 bwatchLock(Block *b)
369 int i;
370 WThread *w;
372 if(bwatchDisabled)
373 return;
375 if(b->part != PartData)
376 return;
378 qlock(&map.lk);
379 w = getWThread();
380 for(i=0; i<w->nb; i++){
381 if(lockConflicts(w->b[i]->score, b->score)){
382 fprint(2, "%d: have block %V; shouldn't lock %V\n",
383 w->pid, w->b[i]->score, b->score);
384 stop();
387 qunlock(&map.lk);
388 if(w->nb >= MaxLock){
389 fprint(2, "%d: too many blocks held\n", w->pid);
390 stop();
391 }else
392 w->b[w->nb++] = b;
395 /*
396 * Note that the calling thread is about to unlock b.
397 */
398 void
399 bwatchUnlock(Block *b)
401 int i;
402 WThread *w;
404 if(bwatchDisabled)
405 return;
407 if(b->part != PartData)
408 return;
410 w = getWThread();
411 for(i=0; i<w->nb; i++)
412 if(w->b[i] == b)
413 break;
414 if(i>=w->nb){
415 fprint(2, "%d: unlock of unlocked block %V\n", w->pid, b->score);
416 stop();
417 }else
418 w->b[i] = w->b[--w->nb];