Blob


1 #include "stdinc.h"
2 #include "dat.h"
3 #include "fns.h"
5 /* #define CHECK(x) x */
6 #define CHECK(x)
8 typedef struct LumpCache LumpCache;
10 enum
11 {
12 HashLog = 9,
13 HashSize = 1<<HashLog,
14 HashMask = HashSize - 1,
15 };
17 struct LumpCache
18 {
19 QLock lock;
20 Rendez full;
21 Lump *free; /* list of available lumps */
22 u32int allowed; /* total allowable space for packets */
23 u32int avail; /* remaining space for packets */
24 u32int now; /* ticks for usage timestamps */
25 Lump **heads; /* hash table for finding address */
26 int nheap; /* number of available victims */
27 Lump **heap; /* heap for locating victims */
28 int nblocks; /* number of blocks allocated */
29 Lump *blocks; /* array of block descriptors */
30 };
32 static LumpCache lumpcache;
34 static void delheap(Lump *db);
35 static int downheap(int i, Lump *b);
36 static void fixheap(int i, Lump *b);
37 static int upheap(int i, Lump *b);
38 static Lump *bumplump(void);
40 void
41 initlumpcache(u32int size, u32int nblocks)
42 {
43 Lump *last, *b;
44 int i;
46 lumpcache.full.l = &lumpcache.lock;
47 lumpcache.nblocks = nblocks;
48 lumpcache.allowed = size;
49 lumpcache.avail = size;
50 lumpcache.heads = MKNZ(Lump*, HashSize);
51 lumpcache.heap = MKNZ(Lump*, nblocks);
52 lumpcache.blocks = MKNZ(Lump, nblocks);
53 setstat(StatLcacheSize, lumpcache.nblocks);
55 last = nil;
56 for(i = 0; i < nblocks; i++){
57 b = &lumpcache.blocks[i];
58 b->type = TWID8;
59 b->heap = TWID32;
60 b->next = last;
61 last = b;
62 }
63 lumpcache.free = last;
64 lumpcache.nheap = 0;
65 }
67 Lump*
68 lookuplump(u8int *score, int type)
69 {
70 uint ms;
71 Lump *b;
72 u32int h;
74 ms = 0;
75 trace(TraceLump, "lookuplump enter");
77 h = hashbits(score, HashLog);
79 /*
80 * look for the block in the cache
81 */
82 qlock(&lumpcache.lock);
83 CHECK(checklumpcache());
84 again:
85 for(b = lumpcache.heads[h]; b != nil; b = b->next){
86 if(scorecmp(score, b->score)==0 && type == b->type){
87 addstat(StatLcacheHit, 1);
88 trace(TraceLump, "lookuplump hit");
89 goto found;
90 }
91 }
93 trace(TraceLump, "lookuplump miss");
95 /*
96 * missed: locate the block with the oldest second to last use.
97 * remove it from the heap, and fix up the heap.
98 */
99 while(lumpcache.free == nil){
100 trace(TraceLump, "lookuplump bump");
101 CHECK(checklumpcache());
102 if(bumplump() == nil){
103 CHECK(checklumpcache());
104 logerr(EAdmin, "all lump cache blocks in use");
105 addstat(StatLcacheStall, 1);
106 CHECK(checklumpcache());
107 rsleep(&lumpcache.full);
108 CHECK(checklumpcache());
109 addstat(StatLcacheStall, -1);
110 goto again;
112 CHECK(checklumpcache());
115 /* start timer on cache miss to avoid system call on cache hit */
116 ms = msec();
118 addstat(StatLcacheMiss, 1);
119 b = lumpcache.free;
120 lumpcache.free = b->next;
122 /*
123 * the new block has no last use, so assume it happens sometime in the middle
124 ZZZ this is not reasonable
125 */
126 b->used = (b->used2 + lumpcache.now) / 2;
128 /*
129 * rechain the block on the correct hash chain
130 */
131 b->next = lumpcache.heads[h];
132 lumpcache.heads[h] = b;
133 if(b->next != nil)
134 b->next->prev = b;
135 b->prev = nil;
137 scorecp(b->score, score);
138 b->type = type;
139 b->size = 0;
140 b->data = nil;
142 found:
143 b->ref++;
144 b->used2 = b->used;
145 b->used = lumpcache.now++;
146 if(b->heap != TWID32)
147 fixheap(b->heap, b);
148 CHECK(checklumpcache());
149 qunlock(&lumpcache.lock);
152 addstat(StatLumpStall, 1);
153 qlock(&b->lock);
154 addstat(StatLumpStall, -1);
156 trace(TraceLump, "lookuplump exit");
157 addstat2(StatLcacheRead, 1, StatLcacheReadTime, ms ? msec()-ms : 0);
158 return b;
161 void
162 insertlump(Lump *b, Packet *p)
164 u32int size;
166 /*
167 * look for the block in the cache
168 */
169 trace(TraceLump, "insertlump enter");
170 qlock(&lumpcache.lock);
171 CHECK(checklumpcache());
172 again:
174 addstat(StatLcacheWrite, 1);
176 /*
177 * missed: locate the block with the oldest second to last use.
178 * remove it from the heap, and fix up the heap.
179 */
180 size = packetasize(p);
181 while(lumpcache.avail < size){
182 trace(TraceLump, "insertlump bump");
183 CHECK(checklumpcache());
184 if(bumplump() == nil){
185 logerr(EAdmin, "all lump cache blocks in use");
186 addstat(StatLcacheStall, 1);
187 CHECK(checklumpcache());
188 rsleep(&lumpcache.full);
189 CHECK(checklumpcache());
190 addstat(StatLcacheStall, -1);
191 goto again;
193 CHECK(checklumpcache());
195 b->data = p;
196 b->size = size;
197 lumpcache.avail -= size;
198 CHECK(checklumpcache());
199 qunlock(&lumpcache.lock);
200 trace(TraceLump, "insertlump exit");
203 void
204 putlump(Lump *b)
206 if(b == nil)
207 return;
209 trace(TraceLump, "putlump");
210 qunlock(&b->lock);
211 qlock(&lumpcache.lock);
212 CHECK(checklumpcache());
213 if(--b->ref == 0){
214 if(b->heap == TWID32)
215 upheap(lumpcache.nheap++, b);
216 trace(TraceLump, "putlump wakeup");
217 rwakeupall(&lumpcache.full);
219 CHECK(checklumpcache());
220 qunlock(&lumpcache.lock);
223 /*
224 * remove some lump from use and update the free list and counters
225 */
226 static Lump*
227 bumplump(void)
229 Lump *b;
230 u32int h;
232 /*
233 * remove blocks until we find one that is unused
234 * referenced blocks are left in the heap even though
235 * they can't be scavenged; this is simple a speed optimization
236 */
237 CHECK(checklumpcache());
238 for(;;){
239 if(lumpcache.nheap == 0){
240 trace(TraceLump, "bumplump emptyheap");
241 return nil;
243 b = lumpcache.heap[0];
244 delheap(b);
245 if(!b->ref){
246 trace(TraceLump, "bumplump wakeup");
247 rwakeupall(&lumpcache.full);
248 break;
252 /*
253 * unchain the block
254 */
255 trace(TraceLump, "bumplump unchain");
256 if(b->prev == nil){
257 h = hashbits(b->score, HashLog);
258 if(lumpcache.heads[h] != b)
259 sysfatal("bad hash chains in lump cache");
260 lumpcache.heads[h] = b->next;
261 }else
262 b->prev->next = b->next;
263 if(b->next != nil)
264 b->next->prev = b->prev;
266 if(b->data != nil){
267 packetfree(b->data);
268 b->data = nil;
269 lumpcache.avail += b->size;
270 b->size = 0;
272 b->type = TWID8;
274 b->next = lumpcache.free;
275 lumpcache.free = b;
277 CHECK(checklumpcache());
278 trace(TraceLump, "bumplump exit");
279 return b;
282 void
283 emptylumpcache(void)
285 qlock(&lumpcache.lock);
286 while(bumplump())
288 qunlock(&lumpcache.lock);
291 /*
292 * delete an arbitrary block from the heap
293 */
294 static void
295 delheap(Lump *db)
297 fixheap(db->heap, lumpcache.heap[--lumpcache.nheap]);
298 db->heap = TWID32;
301 /*
302 * push an element up or down to it's correct new location
303 */
304 static void
305 fixheap(int i, Lump *b)
307 if(upheap(i, b) == i)
308 downheap(i, b);
311 static int
312 upheap(int i, Lump *b)
314 Lump *bb;
315 u32int now;
316 int p;
318 now = lumpcache.now;
319 for(; i != 0; i = p){
320 p = (i - 1) >> 1;
321 bb = lumpcache.heap[p];
322 if(b->used2 - now >= bb->used2 - now)
323 break;
324 lumpcache.heap[i] = bb;
325 bb->heap = i;
328 lumpcache.heap[i] = b;
329 b->heap = i;
330 return i;
333 static int
334 downheap(int i, Lump *b)
336 Lump *bb;
337 u32int now;
338 int k;
340 now = lumpcache.now;
341 for(; ; i = k){
342 k = (i << 1) + 1;
343 if(k >= lumpcache.nheap)
344 break;
345 if(k + 1 < lumpcache.nheap && lumpcache.heap[k]->used2 - now > lumpcache.heap[k + 1]->used2 - now)
346 k++;
347 bb = lumpcache.heap[k];
348 if(b->used2 - now <= bb->used2 - now)
349 break;
350 lumpcache.heap[i] = bb;
351 bb->heap = i;
354 lumpcache.heap[i] = b;
355 b->heap = i;
356 return i;
359 static void
360 findblock(Lump *bb)
362 Lump *b, *last;
363 int h;
365 last = nil;
366 h = hashbits(bb->score, HashLog);
367 for(b = lumpcache.heads[h]; b != nil; b = b->next){
368 if(last != b->prev)
369 sysfatal("bad prev link");
370 if(b == bb)
371 return;
372 last = b;
374 sysfatal("block score=%V type=%#x missing from hash table", bb->score, bb->type);
377 void
378 checklumpcache(void)
380 Lump *b;
381 u32int size, now, nfree;
382 int i, k, refed;
384 now = lumpcache.now;
385 for(i = 0; i < lumpcache.nheap; i++){
386 if(lumpcache.heap[i]->heap != i)
387 sysfatal("lc: mis-heaped at %d: %d", i, lumpcache.heap[i]->heap);
388 if(i > 0 && lumpcache.heap[(i - 1) >> 1]->used2 - now > lumpcache.heap[i]->used2 - now)
389 sysfatal("lc: bad heap ordering");
390 k = (i << 1) + 1;
391 if(k < lumpcache.nheap && lumpcache.heap[i]->used2 - now > lumpcache.heap[k]->used2 - now)
392 sysfatal("lc: bad heap ordering");
393 k++;
394 if(k < lumpcache.nheap && lumpcache.heap[i]->used2 - now > lumpcache.heap[k]->used2 - now)
395 sysfatal("lc: bad heap ordering");
398 refed = 0;
399 size = 0;
400 for(i = 0; i < lumpcache.nblocks; i++){
401 b = &lumpcache.blocks[i];
402 if(b->data == nil && b->size != 0)
403 sysfatal("bad size: %d data=%p", b->size, b->data);
404 if(b->ref && b->heap == TWID32)
405 refed++;
406 if(b->type != TWID8){
407 findblock(b);
408 size += b->size;
410 if(b->heap != TWID32
411 && lumpcache.heap[b->heap] != b)
412 sysfatal("lc: spurious heap value");
414 if(lumpcache.avail != lumpcache.allowed - size){
415 fprint(2, "mismatched available=%d and allowed=%d - used=%d space", lumpcache.avail, lumpcache.allowed, size);
416 *(volatile int*)0=0;
419 nfree = 0;
420 for(b = lumpcache.free; b != nil; b = b->next){
421 if(b->type != TWID8 || b->heap != TWID32)
422 sysfatal("lc: bad free list");
423 nfree++;
426 if(lumpcache.nheap + nfree + refed != lumpcache.nblocks)
427 sysfatal("lc: missing blocks: %d %d %d %d", lumpcache.nheap, refed, nfree, lumpcache.nblocks);