| | varnish-cache/bin/varnishd/cache/cache_hash.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
* |
30 |
|
* This is the central hash-table code, it relies on a chosen hash |
31 |
|
* implementation only for the actual hashing, all the housekeeping |
32 |
|
* happens here. |
33 |
|
* |
34 |
|
* We have two kinds of structures, objecthead and object. An objecthead |
35 |
|
* corresponds to a given (Host:, URL) tuple, and the objects hung from |
36 |
|
* the objecthead may represent various variations (ie: Vary: header, |
37 |
|
* different TTL etc) instances of that web-entity. |
38 |
|
* |
39 |
|
* Each objecthead has a mutex which locks both its own fields, the |
40 |
|
* list of objects and fields in the objects. |
41 |
|
* |
42 |
|
* The hash implementation must supply a reference count facility on |
43 |
|
* the objecthead, and return with a reference held after a lookup. |
44 |
|
* |
45 |
|
* Lookups in the hash implementation returns with a ref held and each |
46 |
|
* object hung from the objhead holds a ref as well. |
47 |
|
* |
48 |
|
* Objects have refcounts which are locked by the objecthead mutex. |
49 |
|
* |
50 |
|
* New objects are always marked busy, and they can go from busy to |
51 |
|
* not busy only once. |
52 |
|
*/ |
53 |
|
|
54 |
|
#include "config.h" |
55 |
|
|
56 |
|
#include <stdio.h> |
57 |
|
#include <stdlib.h> |
58 |
|
|
59 |
|
#include "cache_varnishd.h" |
60 |
|
|
61 |
|
#include "cache/cache_objhead.h" |
62 |
|
#include "cache/cache_transport.h" |
63 |
|
|
64 |
|
#include "hash/hash_slinger.h" |
65 |
|
|
66 |
|
#include "vsha256.h" |
67 |
|
|
68 |
|
struct rush { |
69 |
|
unsigned magic; |
70 |
|
#define RUSH_MAGIC 0xa1af5f01 |
71 |
|
VTAILQ_HEAD(,req) reqs; |
72 |
|
}; |
73 |
|
|
74 |
|
static const struct hash_slinger *hash; |
75 |
|
static struct objhead *private_oh; |
76 |
|
|
77 |
|
static void hsh_rush1(const struct worker *, struct objhead *, |
78 |
|
struct rush *, int); |
79 |
|
static void hsh_rush2(struct worker *, struct rush *); |
80 |
|
static int hsh_deref_objhead(struct worker *wrk, struct objhead **poh); |
81 |
|
static int hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh, |
82 |
|
int); |
83 |
|
|
84 |
|
/*---------------------------------------------------------------------*/ |
85 |
|
|
86 |
|
#define VCF_RETURN(x) const struct vcf_return VCF_##x[1] = { \ |
87 |
|
{ .name = #x, } \ |
88 |
|
}; |
89 |
|
|
90 |
|
VCF_RETURNS() |
91 |
|
#undef VCF_RETURN |
92 |
|
|
93 |
|
/*---------------------------------------------------------------------*/ |
94 |
|
|
95 |
|
static struct objhead * |
96 |
12202 |
hsh_newobjhead(void) |
97 |
|
{ |
98 |
|
struct objhead *oh; |
99 |
|
|
100 |
12202 |
ALLOC_OBJ(oh, OBJHEAD_MAGIC); |
101 |
12202 |
XXXAN(oh); |
102 |
12202 |
oh->refcnt = 1; |
103 |
12202 |
VTAILQ_INIT(&oh->objcs); |
104 |
12202 |
VTAILQ_INIT(&oh->waitinglist); |
105 |
12202 |
Lck_New(&oh->mtx, lck_objhdr); |
106 |
12202 |
return (oh); |
107 |
|
} |
108 |
|
|
109 |
|
/*---------------------------------------------------------------------*/ |
110 |
|
/* Precreate an objhead and object for later use */ |
111 |
|
static void |
112 |
12721 |
hsh_prealloc(struct worker *wrk) |
113 |
|
{ |
114 |
|
|
115 |
12721 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
116 |
|
|
117 |
12721 |
if (wrk->wpriv->nobjcore == NULL) |
118 |
8687 |
wrk->wpriv->nobjcore = ObjNew(wrk); |
119 |
12721 |
CHECK_OBJ_NOTNULL(wrk->wpriv->nobjcore, OBJCORE_MAGIC); |
120 |
|
|
121 |
12721 |
if (wrk->wpriv->nobjhead == NULL) { |
122 |
7591 |
wrk->wpriv->nobjhead = hsh_newobjhead(); |
123 |
7591 |
wrk->stats->n_objecthead++; |
124 |
7591 |
} |
125 |
12721 |
CHECK_OBJ_NOTNULL(wrk->wpriv->nobjhead, OBJHEAD_MAGIC); |
126 |
|
|
127 |
12721 |
if (hash->prep != NULL) |
128 |
12691 |
hash->prep(wrk); |
129 |
12721 |
} |
130 |
|
|
131 |
|
/*---------------------------------------------------------------------*/ |
132 |
|
|
133 |
|
struct objcore * |
134 |
6929 |
HSH_Private(const struct worker *wrk) |
135 |
|
{ |
136 |
|
struct objcore *oc; |
137 |
|
|
138 |
6929 |
CHECK_OBJ_NOTNULL(private_oh, OBJHEAD_MAGIC); |
139 |
|
|
140 |
6929 |
oc = ObjNew(wrk); |
141 |
6929 |
AN(oc); |
142 |
6929 |
oc->refcnt = 1; |
143 |
6929 |
oc->objhead = private_oh; |
144 |
6929 |
oc->flags |= OC_F_PRIVATE; |
145 |
6929 |
Lck_Lock(&private_oh->mtx); |
146 |
6929 |
VTAILQ_INSERT_TAIL(&private_oh->objcs, oc, hsh_list); |
147 |
6929 |
private_oh->refcnt++; |
148 |
6929 |
Lck_Unlock(&private_oh->mtx); |
149 |
6929 |
return (oc); |
150 |
|
} |
151 |
|
|
152 |
|
/*---------------------------------------------------------------------*/ |
153 |
|
|
154 |
|
void |
155 |
4655 |
HSH_Cleanup(const struct worker *wrk) |
156 |
|
{ |
157 |
|
|
158 |
4655 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
159 |
4655 |
CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC); |
160 |
4655 |
if (wrk->wpriv->nobjcore != NULL) |
161 |
10 |
ObjDestroy(wrk, &wrk->wpriv->nobjcore); |
162 |
|
|
163 |
4655 |
if (wrk->wpriv->nobjhead != NULL) { |
164 |
10 |
CHECK_OBJ(wrk->wpriv->nobjhead, OBJHEAD_MAGIC); |
165 |
10 |
Lck_Delete(&wrk->wpriv->nobjhead->mtx); |
166 |
10 |
FREE_OBJ(wrk->wpriv->nobjhead); |
167 |
10 |
wrk->stats->n_objecthead--; |
168 |
10 |
} |
169 |
4655 |
if (wrk->wpriv->nhashpriv != NULL) { |
170 |
|
/* XXX: If needed, add slinger method for this */ |
171 |
10 |
free(wrk->wpriv->nhashpriv); |
172 |
10 |
wrk->wpriv->nhashpriv = NULL; |
173 |
10 |
} |
174 |
4655 |
} |
175 |
|
|
176 |
|
void |
177 |
0 |
HSH_DeleteObjHead(const struct worker *wrk, struct objhead *oh) |
178 |
|
{ |
179 |
0 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
180 |
0 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
181 |
|
|
182 |
0 |
AZ(oh->refcnt); |
183 |
0 |
assert(VTAILQ_EMPTY(&oh->objcs)); |
184 |
0 |
assert(VTAILQ_EMPTY(&oh->waitinglist)); |
185 |
0 |
Lck_Delete(&oh->mtx); |
186 |
0 |
wrk->stats->n_objecthead--; |
187 |
0 |
FREE_OBJ(oh); |
188 |
0 |
} |
189 |
|
|
190 |
|
void |
191 |
71478 |
HSH_AddString(struct req *req, void *ctx, const char *str) |
192 |
|
{ |
193 |
|
|
194 |
71478 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
195 |
71478 |
AN(ctx); |
196 |
71478 |
if (str != NULL) { |
197 |
35736 |
VSHA256_Update(ctx, str, strlen(str)); |
198 |
35736 |
VSLbs(req->vsl, SLT_Hash, TOSTRAND(str)); |
199 |
35736 |
} else |
200 |
35742 |
VSHA256_Update(ctx, &str, 1); |
201 |
71478 |
} |
202 |
|
|
203 |
|
/*--------------------------------------------------------------------- |
204 |
|
* This is a debugging hack to enable testing of boundary conditions |
205 |
|
* in the hash algorithm. |
206 |
|
* We trap the first 9 different digests and translate them to different |
207 |
|
* digests with edge bit conditions |
208 |
|
*/ |
209 |
|
|
210 |
|
static struct hsh_magiclist { |
211 |
|
unsigned char was[VSHA256_LEN]; |
212 |
|
unsigned char now[VSHA256_LEN]; |
213 |
|
} hsh_magiclist[] = { |
214 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
215 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
216 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
217 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
218 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
219 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
220 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
221 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 } }, |
222 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
223 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
224 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
225 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 } }, |
226 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
227 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
228 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
229 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40 } }, |
230 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
231 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
232 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
233 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 } }, |
234 |
|
{ .now = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
235 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
236 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
237 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
238 |
|
{ .now = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
239 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
240 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
241 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
242 |
|
{ .now = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
243 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
244 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
245 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
246 |
|
{ .now = { 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
247 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
248 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
249 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
250 |
|
}; |
251 |
|
|
252 |
|
#define HSH_NMAGIC (sizeof hsh_magiclist / sizeof hsh_magiclist[0]) |
253 |
|
|
254 |
|
static void |
255 |
90 |
hsh_testmagic(void *result) |
256 |
|
{ |
257 |
|
size_t i, j; |
258 |
|
static size_t nused = 0; |
259 |
|
|
260 |
450 |
for (i = 0; i < nused; i++) |
261 |
405 |
if (!memcmp(hsh_magiclist[i].was, result, VSHA256_LEN)) |
262 |
45 |
break; |
263 |
90 |
if (i == nused && i < HSH_NMAGIC) |
264 |
45 |
memcpy(hsh_magiclist[nused++].was, result, VSHA256_LEN); |
265 |
90 |
if (i == nused) |
266 |
0 |
return; |
267 |
90 |
assert(i < HSH_NMAGIC); |
268 |
90 |
fprintf(stderr, "HASHMAGIC: <"); |
269 |
2970 |
for (j = 0; j < VSHA256_LEN; j++) |
270 |
2880 |
fprintf(stderr, "%02x", ((unsigned char*)result)[j]); |
271 |
90 |
fprintf(stderr, "> -> <"); |
272 |
90 |
memcpy(result, hsh_magiclist[i].now, VSHA256_LEN); |
273 |
2970 |
for (j = 0; j < VSHA256_LEN; j++) |
274 |
2880 |
fprintf(stderr, "%02x", ((unsigned char*)result)[j]); |
275 |
90 |
fprintf(stderr, ">\n"); |
276 |
90 |
} |
277 |
|
|
278 |
|
/*--------------------------------------------------------------------- |
279 |
|
* Insert an object which magically appears out of nowhere or, more likely, |
280 |
|
* comes off some persistent storage device. |
281 |
|
* Insert it with a reference held. |
282 |
|
*/ |
283 |
|
|
284 |
|
void |
285 |
85 |
HSH_Insert(struct worker *wrk, const void *digest, struct objcore *oc, |
286 |
|
struct ban *ban) |
287 |
|
{ |
288 |
|
struct objhead *oh; |
289 |
|
struct rush rush; |
290 |
|
|
291 |
85 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
292 |
85 |
CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC); |
293 |
85 |
AN(digest); |
294 |
85 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
295 |
85 |
AN(ban); |
296 |
85 |
AN(oc->flags & OC_F_BUSY); |
297 |
85 |
AZ(oc->flags & OC_F_PRIVATE); |
298 |
85 |
assert(oc->refcnt == 1); |
299 |
85 |
INIT_OBJ(&rush, RUSH_MAGIC); |
300 |
|
|
301 |
85 |
hsh_prealloc(wrk); |
302 |
|
|
303 |
85 |
AN(wrk->wpriv->nobjhead); |
304 |
85 |
oh = hash->lookup(wrk, digest, &wrk->wpriv->nobjhead); |
305 |
85 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
306 |
85 |
Lck_AssertHeld(&oh->mtx); |
307 |
85 |
assert(oh->refcnt > 0); |
308 |
|
|
309 |
|
/* Mark object busy and insert (precreated) objcore in |
310 |
|
objecthead. The new object inherits our objhead reference. */ |
311 |
85 |
oc->objhead = oh; |
312 |
85 |
VTAILQ_INSERT_TAIL(&oh->objcs, oc, hsh_list); |
313 |
85 |
EXP_RefNewObjcore(oc); |
314 |
85 |
Lck_Unlock(&oh->mtx); |
315 |
|
|
316 |
85 |
BAN_RefBan(oc, ban); |
317 |
85 |
AN(oc->ban); |
318 |
|
|
319 |
|
/* Move the object first in the oh list, unbusy it and run the |
320 |
|
waitinglist if necessary */ |
321 |
85 |
Lck_Lock(&oh->mtx); |
322 |
85 |
VTAILQ_REMOVE(&oh->objcs, oc, hsh_list); |
323 |
85 |
VTAILQ_INSERT_HEAD(&oh->objcs, oc, hsh_list); |
324 |
85 |
oc->flags &= ~OC_F_BUSY; |
325 |
85 |
if (!VTAILQ_EMPTY(&oh->waitinglist)) |
326 |
0 |
hsh_rush1(wrk, oh, &rush, HSH_RUSH_POLICY); |
327 |
85 |
Lck_Unlock(&oh->mtx); |
328 |
85 |
hsh_rush2(wrk, &rush); |
329 |
|
|
330 |
85 |
EXP_Insert(wrk, oc); |
331 |
85 |
} |
332 |
|
|
333 |
|
/*--------------------------------------------------------------------- |
334 |
|
*/ |
335 |
|
|
336 |
|
static struct objcore * |
337 |
7360 |
hsh_insert_busyobj(const struct worker *wrk, struct objhead *oh) |
338 |
|
{ |
339 |
|
struct objcore *oc; |
340 |
|
|
341 |
7360 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
342 |
7360 |
CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC); |
343 |
7360 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
344 |
7360 |
Lck_AssertHeld(&oh->mtx); |
345 |
|
|
346 |
7360 |
oc = wrk->wpriv->nobjcore; |
347 |
7360 |
wrk->wpriv->nobjcore = NULL; |
348 |
7360 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
349 |
|
|
350 |
7360 |
AN(oc->flags & OC_F_BUSY); |
351 |
7360 |
oc->refcnt = 1; /* Owned by busyobj */ |
352 |
7360 |
oc->objhead = oh; |
353 |
7360 |
VTAILQ_INSERT_TAIL(&oh->objcs, oc, hsh_list); |
354 |
7360 |
return (oc); |
355 |
|
} |
356 |
|
|
357 |
|
/*--------------------------------------------------------------------- |
358 |
|
*/ |
359 |
|
|
360 |
|
enum lookup_e |
361 |
12636 |
HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp) |
362 |
|
{ |
363 |
|
struct worker *wrk; |
364 |
|
struct objhead *oh; |
365 |
|
struct objcore *oc; |
366 |
|
struct objcore *exp_oc; |
367 |
|
const struct vcf_return *vr; |
368 |
|
vtim_real exp_t_origin; |
369 |
|
int busy_found; |
370 |
|
const uint8_t *vary; |
371 |
|
intmax_t boc_progress; |
372 |
12636 |
unsigned xid = 0; |
373 |
|
unsigned ban_checks; |
374 |
|
unsigned ban_any_variant; |
375 |
12636 |
float dttl = 0.0; |
376 |
|
|
377 |
12636 |
AN(ocp); |
378 |
12636 |
*ocp = NULL; |
379 |
12636 |
AN(bocp); |
380 |
12636 |
*bocp = NULL; |
381 |
|
|
382 |
12636 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
383 |
12636 |
wrk = req->wrk; |
384 |
12636 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
385 |
12636 |
CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC); |
386 |
12636 |
CHECK_OBJ_NOTNULL(req->http, HTTP_MAGIC); |
387 |
12636 |
CHECK_OBJ_ORNULL(req->vcf, VCF_MAGIC); |
388 |
12636 |
AN(hash); |
389 |
|
|
390 |
12636 |
hsh_prealloc(wrk); |
391 |
12636 |
if (DO_DEBUG(DBG_HASHEDGE)) |
392 |
90 |
hsh_testmagic(req->digest); |
393 |
|
|
394 |
12636 |
if (req->hash_objhead != NULL) { |
395 |
|
/* |
396 |
|
* This req came off the waiting list, and brings an |
397 |
|
* oh refcnt with it. |
398 |
|
*/ |
399 |
236 |
CHECK_OBJ_NOTNULL(req->hash_objhead, OBJHEAD_MAGIC); |
400 |
236 |
oh = req->hash_objhead; |
401 |
236 |
Lck_Lock(&oh->mtx); |
402 |
236 |
req->hash_objhead = NULL; |
403 |
236 |
} else { |
404 |
12400 |
AN(wrk->wpriv->nobjhead); |
405 |
12400 |
oh = hash->lookup(wrk, req->digest, &wrk->wpriv->nobjhead); |
406 |
|
} |
407 |
|
|
408 |
12636 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
409 |
12636 |
Lck_AssertHeld(&oh->mtx); |
410 |
|
|
411 |
12636 |
if (req->hash_always_miss) { |
412 |
|
/* XXX: should we do predictive Vary in this case ? */ |
413 |
|
/* Insert new objcore in objecthead and release mutex */ |
414 |
70 |
*bocp = hsh_insert_busyobj(wrk, oh); |
415 |
|
/* NB: no deref of objhead, new object inherits reference */ |
416 |
70 |
Lck_Unlock(&oh->mtx); |
417 |
70 |
return (HSH_MISS); |
418 |
|
} |
419 |
|
|
420 |
12566 |
assert(oh->refcnt > 0); |
421 |
12566 |
busy_found = 0; |
422 |
12566 |
exp_oc = NULL; |
423 |
12566 |
exp_t_origin = 0.0; |
424 |
12566 |
ban_checks = 0; |
425 |
12566 |
ban_any_variant = cache_param->ban_any_variant; |
426 |
27065 |
VTAILQ_FOREACH(oc, &oh->objcs, hsh_list) { |
427 |
|
/* Must be at least our own ref + the objcore we examine */ |
428 |
19689 |
assert(oh->refcnt > 1); |
429 |
19689 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
430 |
19689 |
assert(oc->objhead == oh); |
431 |
19689 |
assert(oc->refcnt > 0); |
432 |
|
|
433 |
19689 |
if (oc->flags & OC_F_DYING) |
434 |
0 |
continue; |
435 |
19689 |
if (oc->flags & OC_F_FAILED) |
436 |
0 |
continue; |
437 |
|
|
438 |
19689 |
CHECK_OBJ_ORNULL(oc->boc, BOC_MAGIC); |
439 |
19689 |
if (oc->flags & OC_F_BUSY) { |
440 |
266 |
if (req->hash_ignore_busy) |
441 |
5 |
continue; |
442 |
|
|
443 |
274 |
if (oc->boc && oc->boc->vary != NULL && |
444 |
13 |
!req->hash_ignore_vary && |
445 |
13 |
!VRY_Match(req, oc->boc->vary)) { |
446 |
5 |
wrk->strangelove++; |
447 |
5 |
continue; |
448 |
|
} |
449 |
|
|
450 |
256 |
busy_found = 1; |
451 |
256 |
continue; |
452 |
|
} |
453 |
|
|
454 |
19423 |
if (oc->ttl <= 0.) |
455 |
230 |
continue; |
456 |
|
|
457 |
19193 |
if (ban_checks++ < ban_any_variant |
458 |
19193 |
&& BAN_CheckObject(wrk, oc, req)) { |
459 |
160 |
oc->flags |= OC_F_DYING; |
460 |
160 |
EXP_Remove(oc, NULL); |
461 |
160 |
continue; |
462 |
|
} |
463 |
|
|
464 |
19033 |
if (!req->hash_ignore_vary && ObjHasAttr(wrk, oc, OA_VARY)) { |
465 |
13905 |
vary = ObjGetAttr(wrk, oc, OA_VARY, NULL); |
466 |
13905 |
AN(vary); |
467 |
13905 |
if (!VRY_Match(req, vary)) { |
468 |
13140 |
wrk->strangelove++; |
469 |
13140 |
continue; |
470 |
|
} |
471 |
765 |
} |
472 |
|
|
473 |
5893 |
if (ban_checks >= ban_any_variant |
474 |
5893 |
&& BAN_CheckObject(wrk, oc, req)) { |
475 |
10 |
oc->flags |= OC_F_DYING; |
476 |
10 |
EXP_Remove(oc, NULL); |
477 |
10 |
continue; |
478 |
|
} |
479 |
|
|
480 |
5883 |
if (req->vcf != NULL) { |
481 |
40 |
vr = req->vcf->func(req, &oc, &exp_oc, 0); |
482 |
40 |
if (vr == VCF_CONTINUE) |
483 |
20 |
continue; |
484 |
20 |
if (vr == VCF_MISS) { |
485 |
15 |
oc = NULL; |
486 |
15 |
break; |
487 |
|
} |
488 |
5 |
if (vr == VCF_HIT) |
489 |
5 |
break; |
490 |
0 |
assert(vr == VCF_DEFAULT); |
491 |
0 |
} |
492 |
|
|
493 |
5843 |
if (EXP_Ttl(req, oc) > req->t_req) { |
494 |
5170 |
assert(oh->refcnt > 1); |
495 |
5170 |
assert(oc->objhead == oh); |
496 |
5170 |
break; |
497 |
|
} |
498 |
|
|
499 |
673 |
if (EXP_Ttl(NULL, oc) <= req->t_req && /* ignore req.ttl */ |
500 |
655 |
oc->t_origin > exp_t_origin) { |
501 |
|
/* record the newest object */ |
502 |
650 |
exp_oc = oc; |
503 |
650 |
exp_t_origin = oc->t_origin; |
504 |
650 |
assert(oh->refcnt > 1); |
505 |
650 |
assert(exp_oc->objhead == oh); |
506 |
650 |
} |
507 |
673 |
} |
508 |
|
|
509 |
12566 |
if (req->vcf != NULL) |
510 |
30 |
(void)req->vcf->func(req, &oc, &exp_oc, 1); |
511 |
|
|
512 |
12566 |
if (oc != NULL && oc->flags & OC_F_HFP) { |
513 |
50 |
xid = VXID(ObjGetXID(wrk, oc)); |
514 |
50 |
dttl = EXP_Dttl(req, oc); |
515 |
50 |
AN(hsh_deref_objhead_unlock(wrk, &oh, HSH_RUSH_POLICY)); |
516 |
50 |
wrk->stats->cache_hitpass++; |
517 |
50 |
VSLb(req->vsl, SLT_HitPass, "%u %.6f", xid, dttl); |
518 |
50 |
return (HSH_HITPASS); |
519 |
|
} |
520 |
|
|
521 |
12516 |
if (oc != NULL) { |
522 |
5130 |
*ocp = oc; |
523 |
5130 |
oc->refcnt++; |
524 |
5130 |
if (oc->flags & OC_F_HFM) { |
525 |
155 |
xid = VXID(ObjGetXID(wrk, oc)); |
526 |
155 |
dttl = EXP_Dttl(req, oc); |
527 |
155 |
*bocp = hsh_insert_busyobj(wrk, oh); |
528 |
155 |
Lck_Unlock(&oh->mtx); |
529 |
155 |
wrk->stats->cache_hitmiss++; |
530 |
155 |
VSLb(req->vsl, SLT_HitMiss, "%u %.6f", xid, dttl); |
531 |
155 |
return (HSH_HITMISS); |
532 |
|
} |
533 |
4975 |
oc->hits++; |
534 |
4975 |
boc_progress = oc->boc == NULL ? -1 : oc->boc->fetched_so_far; |
535 |
4975 |
AN(hsh_deref_objhead_unlock(wrk, &oh, HSH_RUSH_POLICY)); |
536 |
4975 |
Req_LogHit(wrk, req, oc, boc_progress); |
537 |
4975 |
return (HSH_HIT); |
538 |
|
} |
539 |
|
|
540 |
7386 |
if (exp_oc != NULL && exp_oc->flags & OC_F_HFM) { |
541 |
|
/* |
542 |
|
* expired HFM ("grace/keep HFM") |
543 |
|
* |
544 |
|
* XXX should HFM objects actually have grace/keep ? |
545 |
|
* XXX also: why isn't *ocp = exp_oc ? |
546 |
|
*/ |
547 |
20 |
xid = VXID(ObjGetXID(wrk, exp_oc)); |
548 |
20 |
dttl = EXP_Dttl(req, exp_oc); |
549 |
20 |
*bocp = hsh_insert_busyobj(wrk, oh); |
550 |
20 |
Lck_Unlock(&oh->mtx); |
551 |
20 |
wrk->stats->cache_hitmiss++; |
552 |
20 |
VSLb(req->vsl, SLT_HitMiss, "%u %.6f", xid, dttl); |
553 |
20 |
return (HSH_HITMISS); |
554 |
|
} |
555 |
|
|
556 |
7366 |
if (exp_oc != NULL && exp_oc->boc != NULL) |
557 |
20 |
boc_progress = exp_oc->boc->fetched_so_far; |
558 |
|
else |
559 |
7346 |
boc_progress = -1; |
560 |
|
|
561 |
7366 |
if (!busy_found) { |
562 |
7115 |
*bocp = hsh_insert_busyobj(wrk, oh); |
563 |
|
|
564 |
7115 |
if (exp_oc != NULL) { |
565 |
600 |
exp_oc->refcnt++; |
566 |
600 |
*ocp = exp_oc; |
567 |
600 |
if (EXP_Ttl_grace(req, exp_oc) >= req->t_req) { |
568 |
460 |
exp_oc->hits++; |
569 |
460 |
Lck_Unlock(&oh->mtx); |
570 |
460 |
Req_LogHit(wrk, req, exp_oc, boc_progress); |
571 |
460 |
return (HSH_GRACE); |
572 |
|
} |
573 |
140 |
} |
574 |
6655 |
Lck_Unlock(&oh->mtx); |
575 |
6655 |
return (HSH_MISS); |
576 |
|
} |
577 |
|
|
578 |
251 |
AN(busy_found); |
579 |
251 |
if (exp_oc != NULL && EXP_Ttl_grace(req, exp_oc) >= req->t_req) { |
580 |
|
/* we do not wait on the busy object if in grace */ |
581 |
15 |
exp_oc->refcnt++; |
582 |
15 |
*ocp = exp_oc; |
583 |
15 |
exp_oc->hits++; |
584 |
15 |
AN(hsh_deref_objhead_unlock(wrk, &oh, 0)); |
585 |
15 |
Req_LogHit(wrk, req, exp_oc, boc_progress); |
586 |
15 |
return (HSH_GRACE); |
587 |
|
} |
588 |
|
|
589 |
|
/* There are one or more busy objects, wait for them */ |
590 |
236 |
VTAILQ_INSERT_TAIL(&oh->waitinglist, req, w_list); |
591 |
|
|
592 |
236 |
AZ(req->hash_ignore_busy); |
593 |
|
|
594 |
|
/* |
595 |
|
* The objhead reference transfers to the sess, we get it |
596 |
|
* back when the sess comes off the waiting list and |
597 |
|
* calls us again |
598 |
|
*/ |
599 |
236 |
req->hash_objhead = oh; |
600 |
236 |
req->wrk = NULL; |
601 |
236 |
req->waitinglist = 1; |
602 |
|
|
603 |
236 |
if (DO_DEBUG(DBG_WAITINGLIST)) |
604 |
70 |
VSLb(req->vsl, SLT_Debug, "on waiting list <%p>", oh); |
605 |
|
|
606 |
236 |
Lck_Unlock(&oh->mtx); |
607 |
|
|
608 |
236 |
wrk->stats->busy_sleep++; |
609 |
236 |
return (HSH_BUSY); |
610 |
12636 |
} |
611 |
|
|
612 |
|
/*--------------------------------------------------------------------- |
613 |
|
* Pick the req's we are going to rush from the waiting list |
614 |
|
*/ |
615 |
|
|
616 |
|
static void |
617 |
186 |
hsh_rush1(const struct worker *wrk, struct objhead *oh, struct rush *r, int max) |
618 |
|
{ |
619 |
|
int i; |
620 |
|
struct req *req; |
621 |
|
|
622 |
186 |
if (max == 0) |
623 |
10 |
return; |
624 |
176 |
if (max == HSH_RUSH_POLICY) |
625 |
176 |
max = cache_param->rush_exponent; |
626 |
176 |
assert(max > 0); |
627 |
|
|
628 |
176 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
629 |
176 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
630 |
176 |
CHECK_OBJ_NOTNULL(r, RUSH_MAGIC); |
631 |
176 |
VTAILQ_INIT(&r->reqs); |
632 |
176 |
Lck_AssertHeld(&oh->mtx); |
633 |
412 |
for (i = 0; i < max; i++) { |
634 |
372 |
req = VTAILQ_FIRST(&oh->waitinglist); |
635 |
372 |
if (req == NULL) |
636 |
136 |
break; |
637 |
236 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
638 |
236 |
wrk->stats->busy_wakeup++; |
639 |
236 |
AZ(req->wrk); |
640 |
236 |
VTAILQ_REMOVE(&oh->waitinglist, req, w_list); |
641 |
236 |
VTAILQ_INSERT_TAIL(&r->reqs, req, w_list); |
642 |
236 |
req->waitinglist = 0; |
643 |
236 |
} |
644 |
186 |
} |
645 |
|
|
646 |
|
/*--------------------------------------------------------------------- |
647 |
|
* Rush req's that came from waiting list. |
648 |
|
*/ |
649 |
|
|
650 |
|
static void |
651 |
52547 |
hsh_rush2(struct worker *wrk, struct rush *r) |
652 |
|
{ |
653 |
|
struct req *req; |
654 |
|
|
655 |
52547 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
656 |
52547 |
CHECK_OBJ_NOTNULL(r, RUSH_MAGIC); |
657 |
|
|
658 |
52783 |
while (!VTAILQ_EMPTY(&r->reqs)) { |
659 |
236 |
req = VTAILQ_FIRST(&r->reqs); |
660 |
236 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
661 |
236 |
VTAILQ_REMOVE(&r->reqs, req, w_list); |
662 |
236 |
DSL(DBG_WAITINGLIST, req->vsl->wid, "off waiting list"); |
663 |
236 |
if (req->transport->reembark != NULL) { |
664 |
|
// For ESI includes |
665 |
5 |
req->transport->reembark(wrk, req); |
666 |
5 |
} else { |
667 |
|
/* |
668 |
|
* We ignore the queue limits which apply to new |
669 |
|
* requests because if we fail to reschedule there |
670 |
|
* may be vmod_privs to cleanup and we need a proper |
671 |
|
* workerthread for that. |
672 |
|
*/ |
673 |
231 |
AZ(Pool_Task(req->sp->pool, req->task, TASK_QUEUE_RUSH)); |
674 |
|
} |
675 |
|
} |
676 |
52547 |
} |
677 |
|
|
678 |
|
/*--------------------------------------------------------------------- |
679 |
|
* Purge an entire objhead |
680 |
|
*/ |
681 |
|
|
682 |
|
unsigned |
683 |
105 |
HSH_Purge(struct worker *wrk, struct objhead *oh, vtim_real ttl_now, |
684 |
|
vtim_dur ttl, vtim_dur grace, vtim_dur keep) |
685 |
|
{ |
686 |
|
struct objcore *oc, *oc_nows[2], **ocp; |
687 |
105 |
unsigned i, j, n, n_max, total = 0; |
688 |
|
int is_purge; |
689 |
|
|
690 |
105 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
691 |
105 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
692 |
|
|
693 |
105 |
is_purge = (ttl == 0 && grace == 0 && keep == 0); |
694 |
105 |
n_max = WS_ReserveLumps(wrk->aws, sizeof *ocp); |
695 |
105 |
if (n_max < 2) { |
696 |
|
/* No space on the workspace. Give it a stack buffer of 2 |
697 |
|
* elements, which is the minimum for the algorithm |
698 |
|
* below. */ |
699 |
0 |
ocp = oc_nows; |
700 |
0 |
n_max = 2; |
701 |
0 |
} else |
702 |
105 |
ocp = WS_Reservation(wrk->aws); |
703 |
105 |
AN(ocp); |
704 |
|
|
705 |
|
/* Note: This algorithm uses OC references in the list as |
706 |
|
* bookmarks, in order to know how far into the list we were when |
707 |
|
* releasing the mutex partway through and want to resume |
708 |
|
* again. This relies on the list not being reordered while we are |
709 |
|
* not holding the mutex. The only place where that happens is in |
710 |
|
* HSH_Unbusy(), where an OC_F_BUSY OC is moved first in the |
711 |
|
* list. This does not cause problems because we skip OC_F_BUSY |
712 |
|
* OCs. */ |
713 |
|
|
714 |
105 |
Lck_Lock(&oh->mtx); |
715 |
105 |
oc = VTAILQ_FIRST(&oh->objcs); |
716 |
105 |
n = 0; |
717 |
110 |
while (1) { |
718 |
650 |
for (; n < n_max && oc != NULL; oc = VTAILQ_NEXT(oc, hsh_list)) |
719 |
|
{ |
720 |
540 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
721 |
540 |
assert(oc->objhead == oh); |
722 |
540 |
if (oc->flags & OC_F_BUSY) { |
723 |
|
/* We cannot purge busy objects here, because |
724 |
|
* their owners have special rights to them, |
725 |
|
* and may nuke them without concern for the |
726 |
|
* refcount, which by definition always must |
727 |
|
* be one, so they don't check. */ |
728 |
85 |
continue; |
729 |
|
} |
730 |
455 |
if (oc->flags & OC_F_DYING) |
731 |
0 |
continue; |
732 |
455 |
if (is_purge) |
733 |
405 |
oc->flags |= OC_F_DYING; |
734 |
455 |
oc->refcnt++; |
735 |
455 |
ocp[n++] = oc; |
736 |
455 |
} |
737 |
|
|
738 |
110 |
Lck_Unlock(&oh->mtx); |
739 |
|
|
740 |
110 |
if (n == 0) { |
741 |
|
/* No eligible objcores found. We are finished. */ |
742 |
20 |
break; |
743 |
|
} |
744 |
|
|
745 |
90 |
j = n; |
746 |
90 |
if (oc != NULL) { |
747 |
|
/* There are more objects on the objhead that we |
748 |
|
* have not yet looked at, but no more space on |
749 |
|
* the objcore reference list. Do not process the |
750 |
|
* last one, it will be used as the bookmark into |
751 |
|
* the objcore list for the next iteration of the |
752 |
|
* outer loop. */ |
753 |
5 |
j--; |
754 |
5 |
assert(j >= 1); /* True because n_max >= 2 */ |
755 |
5 |
} |
756 |
545 |
for (i = 0; i < j; i++) { |
757 |
455 |
CHECK_OBJ_NOTNULL(ocp[i], OBJCORE_MAGIC); |
758 |
455 |
if (is_purge) |
759 |
405 |
EXP_Remove(ocp[i], NULL); |
760 |
|
else |
761 |
50 |
EXP_Reduce(ocp[i], ttl_now, ttl, grace, keep); |
762 |
455 |
(void)HSH_DerefObjCore(wrk, &ocp[i], 0); |
763 |
455 |
AZ(ocp[i]); |
764 |
455 |
total++; |
765 |
455 |
} |
766 |
|
|
767 |
90 |
if (j == n) { |
768 |
|
/* No bookmark set, that means we got to the end |
769 |
|
* of the objcore list in the previous run and are |
770 |
|
* finished. */ |
771 |
85 |
break; |
772 |
|
} |
773 |
|
|
774 |
5 |
Lck_Lock(&oh->mtx); |
775 |
|
|
776 |
|
/* Move the bookmark first and continue scanning the |
777 |
|
* objcores */ |
778 |
5 |
CHECK_OBJ_NOTNULL(ocp[j], OBJCORE_MAGIC); |
779 |
5 |
ocp[0] = ocp[j]; |
780 |
5 |
n = 1; |
781 |
5 |
oc = VTAILQ_NEXT(ocp[0], hsh_list); |
782 |
5 |
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); |
783 |
|
} |
784 |
|
|
785 |
105 |
WS_Release(wrk->aws, 0); |
786 |
105 |
if (is_purge) |
787 |
55 |
Pool_PurgeStat(total); |
788 |
105 |
return (total); |
789 |
|
} |
790 |
|
|
791 |
|
/*--------------------------------------------------------------------- |
792 |
|
* Fail an objcore |
793 |
|
*/ |
794 |
|
|
795 |
|
void |
796 |
325 |
HSH_Fail(struct objcore *oc) |
797 |
|
{ |
798 |
|
struct objhead *oh; |
799 |
|
|
800 |
325 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
801 |
325 |
oh = oc->objhead; |
802 |
325 |
CHECK_OBJ(oh, OBJHEAD_MAGIC); |
803 |
|
|
804 |
|
/* |
805 |
|
* We have to have either a busy bit, so that HSH_Lookup |
806 |
|
* will not consider this oc, or an object hung of the oc |
807 |
|
* so that it can consider it. |
808 |
|
*/ |
809 |
325 |
assert((oc->flags & OC_F_BUSY) || (oc->stobj->stevedore != NULL)); |
810 |
|
|
811 |
325 |
Lck_Lock(&oh->mtx); |
812 |
325 |
oc->flags |= OC_F_FAILED; |
813 |
325 |
Lck_Unlock(&oh->mtx); |
814 |
325 |
} |
815 |
|
|
816 |
|
/*--------------------------------------------------------------------- |
817 |
|
* Mark a fetch we will not need as cancelled |
818 |
|
*/ |
819 |
|
|
820 |
|
static void |
821 |
1740 |
hsh_cancel(struct objcore *oc) |
822 |
|
{ |
823 |
|
struct objhead *oh; |
824 |
|
|
825 |
1740 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
826 |
1740 |
oh = oc->objhead; |
827 |
1740 |
CHECK_OBJ(oh, OBJHEAD_MAGIC); |
828 |
|
|
829 |
1740 |
Lck_Lock(&oh->mtx); |
830 |
1740 |
oc->flags |= OC_F_CANCEL; |
831 |
1740 |
Lck_Unlock(&oh->mtx); |
832 |
1740 |
} |
833 |
|
|
834 |
|
/*--------------------------------------------------------------------- |
835 |
|
* Cancel a fetch when the client does not need it any more |
836 |
|
*/ |
837 |
|
|
838 |
|
void |
839 |
18324 |
HSH_Cancel(struct worker *wrk, struct objcore *oc, struct boc *boc) |
840 |
|
{ |
841 |
18324 |
struct boc *bocref = NULL; |
842 |
|
|
843 |
18324 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
844 |
|
|
845 |
18324 |
if ((oc->flags & OC_F_TRANSIENT) == 0) |
846 |
11459 |
return; |
847 |
|
|
848 |
|
/* |
849 |
|
* NB: we use two distinct variables to only release the reference if |
850 |
|
* we had to acquire one. The caller-provided boc is optional. |
851 |
|
*/ |
852 |
6865 |
if (boc == NULL) |
853 |
5140 |
bocref = boc = HSH_RefBoc(oc); |
854 |
|
|
855 |
6865 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
856 |
|
|
857 |
6865 |
if (oc->flags & OC_F_HFP) |
858 |
100 |
AN(oc->flags & OC_F_HFM); |
859 |
|
|
860 |
6865 |
if (boc != NULL) { |
861 |
1740 |
hsh_cancel(oc); |
862 |
1740 |
ObjWaitState(oc, BOS_FINISHED); |
863 |
1740 |
} |
864 |
|
|
865 |
6865 |
if (bocref != NULL) |
866 |
15 |
HSH_DerefBoc(wrk, oc); |
867 |
|
|
868 |
6865 |
ObjSlim(wrk, oc); |
869 |
18324 |
} |
870 |
|
|
871 |
|
/*--------------------------------------------------------------------- |
872 |
|
* Unbusy an objcore when the object is completely fetched. |
873 |
|
*/ |
874 |
|
|
875 |
|
void |
876 |
10795 |
HSH_Unbusy(struct worker *wrk, struct objcore *oc) |
877 |
|
{ |
878 |
|
struct objhead *oh; |
879 |
|
struct rush rush; |
880 |
|
|
881 |
10795 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
882 |
10795 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
883 |
10795 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
884 |
|
|
885 |
10795 |
oh = oc->objhead; |
886 |
10795 |
CHECK_OBJ(oh, OBJHEAD_MAGIC); |
887 |
10795 |
INIT_OBJ(&rush, RUSH_MAGIC); |
888 |
|
|
889 |
10795 |
AN(oc->stobj->stevedore); |
890 |
10795 |
AN(oc->flags & OC_F_BUSY); |
891 |
10795 |
assert(oh->refcnt > 0); |
892 |
10795 |
assert(oc->refcnt > 0); |
893 |
|
|
894 |
10795 |
if (!(oc->flags & OC_F_PRIVATE)) { |
895 |
6970 |
BAN_NewObjCore(oc); |
896 |
6970 |
AN(oc->ban); |
897 |
6970 |
} |
898 |
|
|
899 |
|
/* XXX: pretouch neighbors on oh->objcs to prevent page-on under mtx */ |
900 |
10795 |
Lck_Lock(&oh->mtx); |
901 |
10795 |
assert(oh->refcnt > 0); |
902 |
10795 |
assert(oc->refcnt > 0); |
903 |
10795 |
if (!(oc->flags & OC_F_PRIVATE)) |
904 |
6970 |
EXP_RefNewObjcore(oc); /* Takes a ref for expiry */ |
905 |
|
/* XXX: strictly speaking, we should sort in Date: order. */ |
906 |
10795 |
VTAILQ_REMOVE(&oh->objcs, oc, hsh_list); |
907 |
10795 |
VTAILQ_INSERT_HEAD(&oh->objcs, oc, hsh_list); |
908 |
10795 |
oc->flags &= ~OC_F_BUSY; |
909 |
10795 |
if (!VTAILQ_EMPTY(&oh->waitinglist)) { |
910 |
138 |
assert(oh->refcnt > 1); |
911 |
138 |
hsh_rush1(wrk, oh, &rush, HSH_RUSH_POLICY); |
912 |
138 |
} |
913 |
10795 |
Lck_Unlock(&oh->mtx); |
914 |
10795 |
EXP_Insert(wrk, oc); /* Does nothing unless EXP_RefNewObjcore was |
915 |
|
* called */ |
916 |
10795 |
hsh_rush2(wrk, &rush); |
917 |
10795 |
} |
918 |
|
|
919 |
|
/*==================================================================== |
920 |
|
* HSH_Kill() |
921 |
|
* |
922 |
|
* It's dead Jim, kick it... |
923 |
|
*/ |
924 |
|
|
925 |
|
void |
926 |
864 |
HSH_Kill(struct objcore *oc) |
927 |
|
{ |
928 |
|
|
929 |
864 |
HSH_Replace(oc, NULL); |
930 |
864 |
} |
931 |
|
|
932 |
|
void |
933 |
1224 |
HSH_Replace(struct objcore *oc, const struct objcore *new_oc) |
934 |
|
{ |
935 |
|
|
936 |
1224 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
937 |
1224 |
CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC); |
938 |
1224 |
if (new_oc != NULL) { |
939 |
360 |
CHECK_OBJ(new_oc, OBJCORE_MAGIC); |
940 |
360 |
assert(oc->objhead == new_oc->objhead); |
941 |
360 |
} |
942 |
|
|
943 |
1224 |
Lck_Lock(&oc->objhead->mtx); |
944 |
1224 |
oc->flags |= OC_F_DYING; |
945 |
1224 |
Lck_Unlock(&oc->objhead->mtx); |
946 |
1224 |
EXP_Remove(oc, new_oc); |
947 |
1224 |
} |
948 |
|
|
949 |
|
/*==================================================================== |
950 |
|
* HSH_Snipe() |
951 |
|
* |
952 |
|
* If objcore is idle, gain a ref and mark it dead. |
953 |
|
*/ |
954 |
|
|
955 |
|
int |
956 |
55 |
HSH_Snipe(const struct worker *wrk, struct objcore *oc) |
957 |
|
{ |
958 |
55 |
int retval = 0; |
959 |
|
|
960 |
55 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
961 |
55 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
962 |
55 |
CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC); |
963 |
|
|
964 |
55 |
if (oc->refcnt == 1 && !Lck_Trylock(&oc->objhead->mtx)) { |
965 |
55 |
if (oc->refcnt == 1 && !(oc->flags & OC_F_DYING)) { |
966 |
55 |
oc->flags |= OC_F_DYING; |
967 |
55 |
oc->refcnt++; |
968 |
55 |
retval = 1; |
969 |
55 |
} |
970 |
55 |
Lck_Unlock(&oc->objhead->mtx); |
971 |
55 |
} |
972 |
55 |
if (retval) |
973 |
55 |
EXP_Remove(oc, NULL); |
974 |
55 |
return (retval); |
975 |
|
} |
976 |
|
|
977 |
|
|
978 |
|
/*--------------------------------------------------------------------- |
979 |
|
* Gain a reference on an objcore |
980 |
|
*/ |
981 |
|
|
982 |
|
void |
983 |
11885 |
HSH_Ref(struct objcore *oc) |
984 |
|
{ |
985 |
|
struct objhead *oh; |
986 |
|
|
987 |
11885 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
988 |
11885 |
oh = oc->objhead; |
989 |
11885 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
990 |
11885 |
Lck_Lock(&oh->mtx); |
991 |
11885 |
assert(oc->refcnt > 0); |
992 |
11885 |
oc->refcnt++; |
993 |
11885 |
Lck_Unlock(&oh->mtx); |
994 |
11885 |
} |
995 |
|
|
996 |
|
/*--------------------------------------------------------------------- |
997 |
|
* Gain a reference on the busyobj, if the objcore has one |
998 |
|
*/ |
999 |
|
|
1000 |
|
struct boc * |
1001 |
46240 |
HSH_RefBoc(const struct objcore *oc) |
1002 |
|
{ |
1003 |
|
struct objhead *oh; |
1004 |
|
struct boc *boc; |
1005 |
|
|
1006 |
46240 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1007 |
46240 |
oh = oc->objhead; |
1008 |
46240 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
1009 |
46240 |
if (oc->boc == NULL) |
1010 |
26996 |
return (NULL); |
1011 |
19244 |
Lck_Lock(&oh->mtx); |
1012 |
19244 |
assert(oc->refcnt > 0); |
1013 |
19244 |
boc = oc->boc; |
1014 |
19244 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
1015 |
19244 |
if (boc != NULL) { |
1016 |
19240 |
assert(boc->refcount > 0); |
1017 |
19240 |
if (boc->state < BOS_FINISHED) |
1018 |
19098 |
boc->refcount++; |
1019 |
|
else |
1020 |
142 |
boc = NULL; |
1021 |
19240 |
} |
1022 |
19244 |
Lck_Unlock(&oh->mtx); |
1023 |
19244 |
return (boc); |
1024 |
46240 |
} |
1025 |
|
|
1026 |
|
void |
1027 |
33302 |
HSH_DerefBoc(struct worker *wrk, struct objcore *oc) |
1028 |
|
{ |
1029 |
|
struct boc *boc; |
1030 |
|
unsigned r; |
1031 |
|
|
1032 |
33302 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1033 |
33302 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1034 |
33302 |
boc = oc->boc; |
1035 |
33302 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
1036 |
33302 |
Lck_Lock(&oc->objhead->mtx); |
1037 |
33302 |
assert(oc->refcnt > 0); |
1038 |
33302 |
assert(boc->refcount > 0); |
1039 |
33302 |
r = --boc->refcount; |
1040 |
33302 |
if (r == 0) |
1041 |
14210 |
oc->boc = NULL; |
1042 |
33302 |
Lck_Unlock(&oc->objhead->mtx); |
1043 |
33302 |
if (r == 0) |
1044 |
14210 |
ObjBocDone(wrk, oc, &boc); |
1045 |
33302 |
} |
1046 |
|
|
1047 |
|
/*-------------------------------------------------------------------- |
1048 |
|
* Dereference objcore |
1049 |
|
* |
1050 |
|
* Returns zero if target was destroyed. |
1051 |
|
*/ |
1052 |
|
|
1053 |
|
int |
1054 |
34415 |
HSH_DerefObjCore(struct worker *wrk, struct objcore **ocp, int rushmax) |
1055 |
|
{ |
1056 |
|
struct objcore *oc; |
1057 |
|
struct objhead *oh; |
1058 |
|
struct rush rush; |
1059 |
|
int r; |
1060 |
|
|
1061 |
34415 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1062 |
34415 |
TAKE_OBJ_NOTNULL(oc, ocp, OBJCORE_MAGIC); |
1063 |
34415 |
assert(oc->refcnt > 0); |
1064 |
34415 |
INIT_OBJ(&rush, RUSH_MAGIC); |
1065 |
|
|
1066 |
34415 |
oh = oc->objhead; |
1067 |
34415 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
1068 |
|
|
1069 |
34415 |
Lck_Lock(&oh->mtx); |
1070 |
34415 |
assert(oh->refcnt > 0); |
1071 |
34415 |
r = --oc->refcnt; |
1072 |
34415 |
if (!r) |
1073 |
9140 |
VTAILQ_REMOVE(&oh->objcs, oc, hsh_list); |
1074 |
34415 |
if (!VTAILQ_EMPTY(&oh->waitinglist)) { |
1075 |
18 |
assert(oh->refcnt > 1); |
1076 |
18 |
hsh_rush1(wrk, oh, &rush, rushmax); |
1077 |
18 |
} |
1078 |
34415 |
Lck_Unlock(&oh->mtx); |
1079 |
34415 |
hsh_rush2(wrk, &rush); |
1080 |
34415 |
if (r != 0) |
1081 |
25273 |
return (r); |
1082 |
|
|
1083 |
9142 |
AZ(oc->exp_flags); |
1084 |
|
|
1085 |
9142 |
BAN_DestroyObj(oc); |
1086 |
9142 |
AZ(oc->ban); |
1087 |
|
|
1088 |
9142 |
if (oc->stobj->stevedore != NULL) |
1089 |
8717 |
ObjFreeObj(wrk, oc); |
1090 |
9142 |
ObjDestroy(wrk, &oc); |
1091 |
|
|
1092 |
|
/* Drop our ref on the objhead */ |
1093 |
9142 |
assert(oh->refcnt > 0); |
1094 |
9142 |
(void)hsh_deref_objhead(wrk, &oh); |
1095 |
9142 |
return (0); |
1096 |
34415 |
} |
1097 |
|
|
1098 |
|
static int |
1099 |
14182 |
hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh, int max) |
1100 |
|
{ |
1101 |
|
struct objhead *oh; |
1102 |
|
struct rush rush; |
1103 |
|
int r; |
1104 |
|
|
1105 |
14182 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1106 |
14182 |
TAKE_OBJ_NOTNULL(oh, poh, OBJHEAD_MAGIC); |
1107 |
|
|
1108 |
14182 |
Lck_AssertHeld(&oh->mtx); |
1109 |
|
|
1110 |
14182 |
if (oh == private_oh) { |
1111 |
6930 |
assert(VTAILQ_EMPTY(&oh->waitinglist)); |
1112 |
6930 |
assert(oh->refcnt > 1); |
1113 |
6930 |
oh->refcnt--; |
1114 |
6930 |
Lck_Unlock(&oh->mtx); |
1115 |
6930 |
return (1); |
1116 |
|
} |
1117 |
|
|
1118 |
7252 |
INIT_OBJ(&rush, RUSH_MAGIC); |
1119 |
7252 |
if (!VTAILQ_EMPTY(&oh->waitinglist)) { |
1120 |
30 |
assert(oh->refcnt > 1); |
1121 |
30 |
hsh_rush1(wrk, oh, &rush, max); |
1122 |
30 |
} |
1123 |
|
|
1124 |
7252 |
if (oh->refcnt == 1) |
1125 |
881 |
assert(VTAILQ_EMPTY(&oh->waitinglist)); |
1126 |
|
|
1127 |
7252 |
assert(oh->refcnt > 0); |
1128 |
7252 |
r = hash->deref(wrk, oh); /* Unlocks oh->mtx */ |
1129 |
7252 |
hsh_rush2(wrk, &rush); |
1130 |
7252 |
return (r); |
1131 |
14182 |
} |
1132 |
|
|
1133 |
|
static int |
1134 |
9142 |
hsh_deref_objhead(struct worker *wrk, struct objhead **poh) |
1135 |
|
{ |
1136 |
|
struct objhead *oh; |
1137 |
|
|
1138 |
9142 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1139 |
9142 |
TAKE_OBJ_NOTNULL(oh, poh, OBJHEAD_MAGIC); |
1140 |
|
|
1141 |
9142 |
Lck_Lock(&oh->mtx); |
1142 |
9142 |
return (hsh_deref_objhead_unlock(wrk, &oh, 0)); |
1143 |
|
} |
1144 |
|
|
1145 |
|
void |
1146 |
4611 |
HSH_Init(const struct hash_slinger *slinger) |
1147 |
|
{ |
1148 |
|
|
1149 |
4611 |
assert(DIGEST_LEN == VSHA256_LEN); /* avoid #include pollution */ |
1150 |
4611 |
hash = slinger; |
1151 |
4611 |
if (hash->start != NULL) |
1152 |
4611 |
hash->start(); |
1153 |
4611 |
private_oh = hsh_newobjhead(); |
1154 |
4611 |
private_oh->refcnt = 1; |
1155 |
4611 |
} |