varnish-cache/bin/varnishd/cache/cache_hash.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 * This is the central hash-table code, it relies on a chosen hash
31
 * implementation only for the actual hashing, all the housekeeping
32
 * happens here.
33
 *
34
 * We have two kinds of structures, objecthead and object.  An objecthead
35
 * corresponds to a given (Host:, URL) tuple, and the objects hung from
36
 * the objecthead may represent various variations (ie: Vary: header,
37
 * different TTL etc) instances of that web-entity.
38
 *
39
 * Each objecthead has a mutex which locks both its own fields, the
40
 * list of objects and fields in the objects.
41
 *
42
 * The hash implementation must supply a reference count facility on
43
 * the objecthead, and return with a reference held after a lookup.
44
 *
45
 * Lookups in the hash implementation returns with a ref held and each
46
 * object hung from the objhead holds a ref as well.
47
 *
48
 * Objects have refcounts which are locked by the objecthead mutex.
49
 *
50
 * New objects are always marked busy, and they can go from busy to
51
 * not busy only once.
52
 */
53
54
#include "config.h"
55
56
#include <stdio.h>
57
#include <stdlib.h>
58
59
#include "cache_varnishd.h"
60
61
#include "cache/cache_objhead.h"
62
#include "cache/cache_transport.h"
63
64
#include "hash/hash_slinger.h"
65
66
#include "vsha256.h"
67
68
struct rush {
69
        unsigned                magic;
70
#define RUSH_MAGIC              0xa1af5f01
71
        VTAILQ_HEAD(,req)       reqs;
72
};
73
74
static const struct hash_slinger *hash;
75
static struct objhead *private_oh;
76
77
static void hsh_rush1(const struct worker *, struct objhead *,
78
    struct rush *, int);
79
static void hsh_rush2(struct worker *, struct rush *);
80
static int hsh_deref_objhead(struct worker *wrk, struct objhead **poh);
81
static int hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh,
82
    int);
83
84
/*---------------------------------------------------------------------*/
85
86
#define VCF_RETURN(x) const struct vcf_return VCF_##x[1] = { \
87
        { .name = #x, } \
88
};
89
90
VCF_RETURNS()
91
#undef VCF_RETURN
92
93
/*---------------------------------------------------------------------*/
94
95
static struct objhead *
96 99026
hsh_newobjhead(void)
97
{
98
        struct objhead *oh;
99
100 99026
        ALLOC_OBJ(oh, OBJHEAD_MAGIC);
101 99026
        XXXAN(oh);
102 99026
        oh->refcnt = 1;
103 99026
        VTAILQ_INIT(&oh->objcs);
104 99026
        VTAILQ_INIT(&oh->waitinglist);
105 99026
        Lck_New(&oh->mtx, lck_objhdr);
106 99026
        return (oh);
107
}
108
109
/*---------------------------------------------------------------------*/
110
/* Precreate an objhead and object for later use */
111
static void
112 103656
hsh_prealloc(struct worker *wrk)
113
{
114
115 103656
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
116
117 103656
        if (wrk->wpriv->nobjcore == NULL)
118 70772
                wrk->wpriv->nobjcore = ObjNew(wrk);
119 103656
        CHECK_OBJ_NOTNULL(wrk->wpriv->nobjcore, OBJCORE_MAGIC);
120
121 103656
        if (wrk->wpriv->nobjhead == NULL) {
122 61882
                wrk->wpriv->nobjhead = hsh_newobjhead();
123 61882
                wrk->stats->n_objecthead++;
124 61882
        }
125 103656
        CHECK_OBJ_NOTNULL(wrk->wpriv->nobjhead, OBJHEAD_MAGIC);
126
127 103656
        if (hash->prep != NULL)
128 103416
                hash->prep(wrk);
129 103656
}
130
131
/*---------------------------------------------------------------------*/
132
133
struct objcore *
134 55840
HSH_Private(const struct worker *wrk)
135
{
136
        struct objcore *oc;
137
138 55840
        CHECK_OBJ_NOTNULL(private_oh, OBJHEAD_MAGIC);
139
140 55840
        oc = ObjNew(wrk);
141 55840
        AN(oc);
142 55840
        oc->refcnt = 1;
143 55840
        oc->objhead = private_oh;
144 55840
        oc->flags |= OC_F_PRIVATE;
145 55840
        Lck_Lock(&private_oh->mtx);
146 55840
        VTAILQ_INSERT_TAIL(&private_oh->objcs, oc, hsh_list);
147 55840
        private_oh->refcnt++;
148 55840
        Lck_Unlock(&private_oh->mtx);
149 55840
        return (oc);
150
}
151
152
/*---------------------------------------------------------------------*/
153
154
void
155 37662
HSH_Cleanup(const struct worker *wrk)
156
{
157
158 37662
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
159 37662
        CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC);
160 37662
        if (wrk->wpriv->nobjcore != NULL)
161 58
                ObjDestroy(wrk, &wrk->wpriv->nobjcore);
162
163 37662
        if (wrk->wpriv->nobjhead != NULL) {
164 58
                CHECK_OBJ(wrk->wpriv->nobjhead, OBJHEAD_MAGIC);
165 58
                Lck_Delete(&wrk->wpriv->nobjhead->mtx);
166 58
                FREE_OBJ(wrk->wpriv->nobjhead);
167 58
                wrk->stats->n_objecthead--;
168 58
        }
169 37662
        if (wrk->wpriv->nhashpriv != NULL) {
170
                /* XXX: If needed, add slinger method for this */
171 80
                free(wrk->wpriv->nhashpriv);
172 80
                wrk->wpriv->nhashpriv = NULL;
173 80
        }
174 37662
}
175
176
void
177 0
HSH_DeleteObjHead(const struct worker *wrk, struct objhead *oh)
178
{
179 0
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
180 0
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
181
182 0
        AZ(oh->refcnt);
183 0
        assert(VTAILQ_EMPTY(&oh->objcs));
184 0
        assert(VTAILQ_EMPTY(&oh->waitinglist));
185 0
        Lck_Delete(&oh->mtx);
186 0
        wrk->stats->n_objecthead--;
187 0
        FREE_OBJ(oh);
188 0
}
189
190
void
191 580859
HSH_AddString(struct req *req, void *ctx, const char *str)
192
{
193
194 580859
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
195 580859
        AN(ctx);
196 580859
        if (str != NULL) {
197 290444
                VSHA256_Update(ctx, str, strlen(str));
198 290444
                VSLbs(req->vsl, SLT_Hash, TOSTRAND(str));
199 290444
        } else
200 290415
                VSHA256_Update(ctx, &str, 1);
201 580859
}
202
203
/*---------------------------------------------------------------------
204
 * This is a debugging hack to enable testing of boundary conditions
205
 * in the hash algorithm.
206
 * We trap the first 9 different digests and translate them to different
207
 * digests with edge bit conditions
208
 */
209
210
static struct hsh_magiclist {
211
        unsigned char was[VSHA256_LEN];
212
        unsigned char now[VSHA256_LEN];
213
} hsh_magiclist[] = {
214
        { .now = {      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
215
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
216
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
217
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } },
218
        { .now = {      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
219
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
220
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
221
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 } },
222
        { .now = {      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
223
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
224
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
225
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 } },
226
        { .now = {      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
227
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
228
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
229
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40 } },
230
        { .now = {      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
231
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
232
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
233
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 } },
234
        { .now = {      0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
235
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
236
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
237
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } },
238
        { .now = {      0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
239
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
240
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
241
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } },
242
        { .now = {      0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
243
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
244
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
245
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } },
246
        { .now = {      0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
247
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
248
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
249
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } },
250
};
251
252
#define HSH_NMAGIC (sizeof hsh_magiclist / sizeof hsh_magiclist[0])
253
254
static void
255 720
hsh_testmagic(void *result)
256
{
257
        size_t i, j;
258
        static size_t nused = 0;
259
260 3600
        for (i = 0; i < nused; i++)
261 3240
                if (!memcmp(hsh_magiclist[i].was, result, VSHA256_LEN))
262 360
                        break;
263 720
        if (i == nused && i < HSH_NMAGIC)
264 360
                memcpy(hsh_magiclist[nused++].was, result, VSHA256_LEN);
265 720
        if (i == nused)
266 0
                return;
267 720
        assert(i < HSH_NMAGIC);
268 720
        fprintf(stderr, "HASHMAGIC: <");
269 23760
        for (j = 0; j < VSHA256_LEN; j++)
270 23040
                fprintf(stderr, "%02x", ((unsigned char*)result)[j]);
271 720
        fprintf(stderr, "> -> <");
272 720
        memcpy(result, hsh_magiclist[i].now, VSHA256_LEN);
273 23760
        for (j = 0; j < VSHA256_LEN; j++)
274 23040
                fprintf(stderr, "%02x", ((unsigned char*)result)[j]);
275 720
        fprintf(stderr, ">\n");
276 720
}
277
278
/*---------------------------------------------------------------------
279
 * Insert an object which magically appears out of nowhere or, more likely,
280
 * comes off some persistent storage device.
281
 * Insert it with a reference held.
282
 */
283
284
void
285 680
HSH_Insert(struct worker *wrk, const void *digest, struct objcore *oc,
286
    struct ban *ban)
287
{
288
        struct objhead *oh;
289
        struct rush rush;
290
291 680
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
292 680
        CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC);
293 680
        AN(digest);
294 680
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
295 680
        AN(ban);
296 680
        AN(oc->flags & OC_F_BUSY);
297 680
        AZ(oc->flags & OC_F_PRIVATE);
298 680
        assert(oc->refcnt == 1);
299 680
        INIT_OBJ(&rush, RUSH_MAGIC);
300
301 680
        hsh_prealloc(wrk);
302
303 680
        AN(wrk->wpriv->nobjhead);
304 680
        oh = hash->lookup(wrk, digest, &wrk->wpriv->nobjhead);
305 680
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
306 680
        Lck_AssertHeld(&oh->mtx);
307 680
        assert(oh->refcnt > 0);
308
309
        /* Mark object busy and insert (precreated) objcore in
310
           objecthead. The new object inherits our objhead reference. */
311 680
        oc->objhead = oh;
312 680
        VTAILQ_INSERT_TAIL(&oh->objcs, oc, hsh_list);
313 680
        EXP_RefNewObjcore(oc);
314 680
        Lck_Unlock(&oh->mtx);
315
316 680
        BAN_RefBan(oc, ban);
317 680
        AN(oc->ban);
318
319
        /* Move the object first in the oh list, unbusy it and run the
320
           waitinglist if necessary */
321 680
        Lck_Lock(&oh->mtx);
322 680
        VTAILQ_REMOVE(&oh->objcs, oc, hsh_list);
323 680
        VTAILQ_INSERT_HEAD(&oh->objcs, oc, hsh_list);
324 680
        oc->flags &= ~OC_F_BUSY;
325 680
        if (!VTAILQ_EMPTY(&oh->waitinglist))
326 0
                hsh_rush1(wrk, oh, &rush, HSH_RUSH_POLICY);
327 680
        Lck_Unlock(&oh->mtx);
328 680
        hsh_rush2(wrk, &rush);
329
330 680
        EXP_Insert(wrk, oc);
331 680
}
332
333
/*---------------------------------------------------------------------
334
 */
335
336
static struct objcore *
337 59702
hsh_insert_busyobj(const struct worker *wrk, struct objhead *oh)
338
{
339
        struct objcore *oc;
340
341 59702
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
342 59702
        CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC);
343 59702
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
344 59702
        Lck_AssertHeld(&oh->mtx);
345
346 59702
        oc = wrk->wpriv->nobjcore;
347 59702
        wrk->wpriv->nobjcore = NULL;
348 59702
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
349
350 59702
        AN(oc->flags & OC_F_BUSY);
351 59702
        oc->refcnt = 1;         /* Owned by busyobj */
352 59702
        oc->objhead = oh;
353 59702
        VTAILQ_INSERT_TAIL(&oh->objcs, oc, hsh_list);
354 59702
        return (oc);
355
}
356
357
/*---------------------------------------------------------------------
358
 */
359
360
enum lookup_e
361 102975
HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp)
362
{
363
        struct worker *wrk;
364
        struct objhead *oh;
365
        struct objcore *oc;
366
        struct objcore *exp_oc;
367
        const struct vcf_return *vr;
368
        vtim_real exp_t_origin;
369
        int busy_found;
370
        const uint8_t *vary;
371
        intmax_t boc_progress;
372 102975
        unsigned xid = 0;
373
        unsigned ban_checks;
374
        unsigned ban_any_variant;
375 102975
        float dttl = 0.0;
376
377 102975
        AN(ocp);
378 102975
        *ocp = NULL;
379 102975
        AN(bocp);
380 102975
        *bocp = NULL;
381
382 102975
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
383 102975
        wrk = req->wrk;
384 102975
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
385 102975
        CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC);
386 102975
        CHECK_OBJ_NOTNULL(req->http, HTTP_MAGIC);
387 102975
        CHECK_OBJ_ORNULL(req->vcf, VCF_MAGIC);
388 102975
        AN(hash);
389
390 102975
        hsh_prealloc(wrk);
391 102975
        if (DO_DEBUG(DBG_HASHEDGE))
392 720
                hsh_testmagic(req->digest);
393
394 102975
        if (req->hash_objhead != NULL) {
395
                /*
396
                 * This req came off the waiting list, and brings an
397
                 * oh refcnt with it.
398
                 */
399 1882
                CHECK_OBJ_NOTNULL(req->hash_objhead, OBJHEAD_MAGIC);
400 1882
                oh = req->hash_objhead;
401 1882
                Lck_Lock(&oh->mtx);
402 1882
                req->hash_objhead = NULL;
403 1882
        } else {
404 101093
                AN(wrk->wpriv->nobjhead);
405 101093
                oh = hash->lookup(wrk, req->digest, &wrk->wpriv->nobjhead);
406
        }
407
408 102975
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
409 102975
        Lck_AssertHeld(&oh->mtx);
410
411 102975
        if (req->hash_always_miss) {
412
                /* XXX: should we do predictive Vary in this case ? */
413
                /* Insert new objcore in objecthead and release mutex */
414 560
                *bocp = hsh_insert_busyobj(wrk, oh);
415
                /* NB: no deref of objhead, new object inherits reference */
416 560
                Lck_Unlock(&oh->mtx);
417 560
                return (HSH_MISS);
418
        }
419
420 102415
        assert(oh->refcnt > 0);
421 102415
        busy_found = 0;
422 102415
        exp_oc = NULL;
423 102415
        exp_t_origin = 0.0;
424 102415
        ban_checks = 0;
425 102415
        ban_any_variant = cache_param->ban_any_variant;
426 218421
        VTAILQ_FOREACH(oc, &oh->objcs, hsh_list) {
427
                /* Must be at least our own ref + the objcore we examine */
428 158677
                assert(oh->refcnt > 1);
429 158677
                CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
430 158677
                assert(oc->objhead == oh);
431 158677
                assert(oc->refcnt > 0);
432
433 158677
                if (oc->flags & OC_F_DYING)
434 0
                        continue;
435 158677
                if (oc->flags & OC_F_FAILED)
436 1
                        continue;
437
438 158676
                CHECK_OBJ_ORNULL(oc->boc, BOC_MAGIC);
439 158676
                if (oc->flags & OC_F_BUSY) {
440 2123
                        if (req->hash_ignore_busy)
441 40
                                continue;
442
443 2203
                        if (oc->boc && oc->boc->vary != NULL &&
444 120
                            !req->hash_ignore_vary &&
445 120
                            !VRY_Match(req, oc->boc->vary)) {
446 40
                                wrk->strangelove++;
447 40
                                continue;
448
                        }
449
450 2043
                        busy_found = 1;
451 2043
                        continue;
452
                }
453
454 156553
                if (oc->ttl <= 0.)
455 1840
                        continue;
456
457 154713
                if (ban_checks++ < ban_any_variant
458 154713
                    && BAN_CheckObject(wrk, oc, req)) {
459 1280
                        oc->flags |= OC_F_DYING;
460 1280
                        EXP_Remove(oc, NULL);
461 1280
                        continue;
462
                }
463
464 153433
                if (!req->hash_ignore_vary && ObjHasAttr(wrk, oc, OA_VARY)) {
465 111265
                        vary = ObjGetAttr(wrk, oc, OA_VARY, NULL);
466 111265
                        AN(vary);
467 111265
                        if (!VRY_Match(req, vary)) {
468 105145
                                wrk->strangelove++;
469 105145
                                continue;
470
                        }
471 6120
                }
472
473 48288
                if (ban_checks >= ban_any_variant
474 48288
                    && BAN_CheckObject(wrk, oc, req)) {
475 80
                        oc->flags |= OC_F_DYING;
476 80
                        EXP_Remove(oc, NULL);
477 80
                        continue;
478
                }
479
480 48208
                if (req->vcf != NULL) {
481 320
                        vr = req->vcf->func(req, &oc, &exp_oc, 0);
482 320
                        if (vr == VCF_CONTINUE)
483 160
                                continue;
484 160
                        if (vr == VCF_MISS) {
485 120
                                oc = NULL;
486 120
                                break;
487
                        }
488 40
                        if (vr == VCF_HIT)
489 40
                                break;
490 0
                        assert(vr == VCF_DEFAULT);
491 0
                }
492
493 47888
                if (EXP_Ttl(req, oc) > req->t_req) {
494 42511
                        assert(oh->refcnt > 1);
495 42511
                        assert(oc->objhead == oh);
496 42511
                        break;
497
                }
498
499 5377
                if (EXP_Ttl(NULL, oc) <= req->t_req && /* ignore req.ttl */
500 5240
                    oc->t_origin > exp_t_origin) {
501
                        /* record the newest object */
502 5200
                        exp_oc = oc;
503 5200
                        exp_t_origin = oc->t_origin;
504 5200
                        assert(oh->refcnt > 1);
505 5200
                        assert(exp_oc->objhead == oh);
506 5200
                }
507 5377
        }
508
509 102415
        if (req->vcf != NULL)
510 240
                (void)req->vcf->func(req, &oc, &exp_oc, 1);
511
512 102415
        if (oc != NULL && oc->flags & OC_F_HFP) {
513 439
                xid = VXID(ObjGetXID(wrk, oc));
514 439
                dttl = EXP_Dttl(req, oc);
515 439
                AN(hsh_deref_objhead_unlock(wrk, &oh, HSH_RUSH_POLICY));
516 439
                wrk->stats->cache_hitpass++;
517 439
                VSLb(req->vsl, SLT_HitPass, "%u %.6f", xid, dttl);
518 439
                return (HSH_HITPASS);
519
        }
520
521 101976
        if (oc != NULL) {
522 42151
                *ocp = oc;
523 42151
                oc->refcnt++;
524 42151
                if (oc->flags & OC_F_HFM) {
525 1320
                        xid = VXID(ObjGetXID(wrk, oc));
526 1320
                        dttl = EXP_Dttl(req, oc);
527 1320
                        *bocp = hsh_insert_busyobj(wrk, oh);
528 1320
                        Lck_Unlock(&oh->mtx);
529 1320
                        wrk->stats->cache_hitmiss++;
530 1320
                        VSLb(req->vsl, SLT_HitMiss, "%u %.6f", xid, dttl);
531 1320
                        return (HSH_HITMISS);
532
                }
533 40831
                oc->hits++;
534 40831
                boc_progress = oc->boc == NULL ? -1 : oc->boc->fetched_so_far;
535 40831
                AN(hsh_deref_objhead_unlock(wrk, &oh, HSH_RUSH_POLICY));
536 40831
                Req_LogHit(wrk, req, oc, boc_progress);
537 40831
                return (HSH_HIT);
538
        }
539
540 59825
        if (exp_oc != NULL && exp_oc->flags & OC_F_HFM) {
541
                /*
542
                 * expired HFM ("grace/keep HFM")
543
                 *
544
                 * XXX should HFM objects actually have grace/keep ?
545
                 * XXX also:  why isn't *ocp = exp_oc ?
546
                 */
547 160
                xid = VXID(ObjGetXID(wrk, exp_oc));
548 160
                dttl = EXP_Dttl(req, exp_oc);
549 160
                *bocp = hsh_insert_busyobj(wrk, oh);
550 160
                Lck_Unlock(&oh->mtx);
551 160
                wrk->stats->cache_hitmiss++;
552 160
                VSLb(req->vsl, SLT_HitMiss, "%u %.6f", xid, dttl);
553 160
                return (HSH_HITMISS);
554
        }
555
556 59665
        if (exp_oc != NULL && exp_oc->boc != NULL)
557 160
                boc_progress = exp_oc->boc->fetched_so_far;
558
        else
559 59505
                boc_progress = -1;
560
561 59665
        if (!busy_found) {
562 57662
                *bocp = hsh_insert_busyobj(wrk, oh);
563
564 57662
                if (exp_oc != NULL) {
565 4800
                        exp_oc->refcnt++;
566 4800
                        *ocp = exp_oc;
567 4800
                        if (EXP_Ttl_grace(req, exp_oc) >= req->t_req) {
568 3680
                                exp_oc->hits++;
569 3680
                                Lck_Unlock(&oh->mtx);
570 3680
                                Req_LogHit(wrk, req, exp_oc, boc_progress);
571 3680
                                return (HSH_GRACE);
572
                        }
573 1120
                }
574 53982
                Lck_Unlock(&oh->mtx);
575 53982
                return (HSH_MISS);
576
        }
577
578 2003
        AN(busy_found);
579 2003
        if (exp_oc != NULL && EXP_Ttl_grace(req, exp_oc) >= req->t_req) {
580
                /* we do not wait on the busy object if in grace */
581 120
                exp_oc->refcnt++;
582 120
                *ocp = exp_oc;
583 120
                exp_oc->hits++;
584 120
                AN(hsh_deref_objhead_unlock(wrk, &oh, 0));
585 120
                Req_LogHit(wrk, req, exp_oc, boc_progress);
586 120
                return (HSH_GRACE);
587
        }
588
589
        /* There are one or more busy objects, wait for them */
590 1883
        VTAILQ_INSERT_TAIL(&oh->waitinglist, req, w_list);
591
592 1883
        AZ(req->hash_ignore_busy);
593
594
        /*
595
         * The objhead reference transfers to the sess, we get it
596
         * back when the sess comes off the waiting list and
597
         * calls us again
598
         */
599 1883
        req->hash_objhead = oh;
600 1883
        req->wrk = NULL;
601 1883
        req->waitinglist = 1;
602
603 1883
        if (DO_DEBUG(DBG_WAITINGLIST))
604 560
                VSLb(req->vsl, SLT_Debug, "on waiting list <%p>", oh);
605
606 1883
        Lck_Unlock(&oh->mtx);
607
608 1883
        wrk->stats->busy_sleep++;
609 1883
        return (HSH_BUSY);
610 102975
}
611
612
/*---------------------------------------------------------------------
613
 * Pick the req's we are going to rush from the waiting list
614
 */
615
616
static void
617 1476
hsh_rush1(const struct worker *wrk, struct objhead *oh, struct rush *r, int max)
618
{
619
        int i;
620
        struct req *req;
621
622 1476
        if (max == 0)
623 64
                return;
624 1412
        if (max == HSH_RUSH_POLICY)
625 1412
                max = cache_param->rush_exponent;
626 1412
        assert(max > 0);
627
628 1412
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
629 1412
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
630 1412
        CHECK_OBJ_NOTNULL(r, RUSH_MAGIC);
631 1412
        VTAILQ_INIT(&r->reqs);
632 1412
        Lck_AssertHeld(&oh->mtx);
633 3295
        for (i = 0; i < max; i++) {
634 2975
                req = VTAILQ_FIRST(&oh->waitinglist);
635 2975
                if (req == NULL)
636 1092
                        break;
637 1883
                CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
638 1883
                wrk->stats->busy_wakeup++;
639 1883
                AZ(req->wrk);
640 1883
                VTAILQ_REMOVE(&oh->waitinglist, req, w_list);
641 1883
                VTAILQ_INSERT_TAIL(&r->reqs, req, w_list);
642 1883
                req->waitinglist = 0;
643 1883
        }
644 1476
}
645
646
/*---------------------------------------------------------------------
647
 * Rush req's that came from waiting list.
648
 */
649
650
static void
651 426271
hsh_rush2(struct worker *wrk, struct rush *r)
652
{
653
        struct req *req;
654
655 426271
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
656 426271
        CHECK_OBJ_NOTNULL(r, RUSH_MAGIC);
657
658 428152
        while (!VTAILQ_EMPTY(&r->reqs)) {
659 1881
                req = VTAILQ_FIRST(&r->reqs);
660 1881
                CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
661 1881
                VTAILQ_REMOVE(&r->reqs, req, w_list);
662 1881
                DSL(DBG_WAITINGLIST, req->vsl->wid, "off waiting list");
663 1881
                if (req->transport->reembark != NULL) {
664
                        // For ESI includes
665 40
                        req->transport->reembark(wrk, req);
666 40
                } else {
667
                        /*
668
                         * We ignore the queue limits which apply to new
669
                         * requests because if we fail to reschedule there
670
                         * may be vmod_privs to cleanup and we need a proper
671
                         * workerthread for that.
672
                         */
673 1841
                        AZ(Pool_Task(req->sp->pool, req->task, TASK_QUEUE_RUSH));
674
                }
675
        }
676 426271
}
677
678
/*---------------------------------------------------------------------
679
 * Purge an entire objhead
680
 */
681
682
unsigned
683 840
HSH_Purge(struct worker *wrk, struct objhead *oh, vtim_real ttl_now,
684
    vtim_dur ttl, vtim_dur grace, vtim_dur keep)
685
{
686
        struct objcore *oc, *oc_nows[2], **ocp;
687 840
        unsigned i, j, n, n_max, total = 0;
688
        int is_purge;
689
690 840
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
691 840
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
692
693 840
        is_purge = (ttl == 0 && grace == 0 && keep == 0);
694 840
        n_max = WS_ReserveLumps(wrk->aws, sizeof *ocp);
695 840
        if (n_max < 2) {
696
                /* No space on the workspace. Give it a stack buffer of 2
697
                 * elements, which is the minimum for the algorithm
698
                 * below. */
699 0
                ocp = oc_nows;
700 0
                n_max = 2;
701 0
        } else
702 840
                ocp = WS_Reservation(wrk->aws);
703 840
        AN(ocp);
704
705
        /* Note: This algorithm uses OC references in the list as
706
         * bookmarks, in order to know how far into the list we were when
707
         * releasing the mutex partway through and want to resume
708
         * again. This relies on the list not being reordered while we are
709
         * not holding the mutex. The only place where that happens is in
710
         * HSH_Unbusy(), where an OC_F_BUSY OC is moved first in the
711
         * list. This does not cause problems because we skip OC_F_BUSY
712
         * OCs. */
713
714 840
        Lck_Lock(&oh->mtx);
715 840
        oc = VTAILQ_FIRST(&oh->objcs);
716 840
        n = 0;
717 880
        while (1) {
718 5200
                for (; n < n_max && oc != NULL; oc = VTAILQ_NEXT(oc, hsh_list))
719
                {
720 4320
                        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
721 4320
                        assert(oc->objhead == oh);
722 4320
                        if (oc->flags & OC_F_BUSY) {
723
                                /* We cannot purge busy objects here, because
724
                                 * their owners have special rights to them,
725
                                 * and may nuke them without concern for the
726
                                 * refcount, which by definition always must
727
                                 * be one, so they don't check. */
728 680
                                continue;
729
                        }
730 3640
                        if (oc->flags & OC_F_DYING)
731 0
                                continue;
732 3640
                        if (is_purge)
733 3240
                                oc->flags |= OC_F_DYING;
734 3640
                        oc->refcnt++;
735 3640
                        ocp[n++] = oc;
736 3640
                }
737
738 880
                Lck_Unlock(&oh->mtx);
739
740 880
                if (n == 0) {
741
                        /* No eligible objcores found. We are finished. */
742 160
                        break;
743
                }
744
745 720
                j = n;
746 720
                if (oc != NULL) {
747
                        /* There are more objects on the objhead that we
748
                         * have not yet looked at, but no more space on
749
                         * the objcore reference list. Do not process the
750
                         * last one, it will be used as the bookmark into
751
                         * the objcore list for the next iteration of the
752
                         * outer loop. */
753 40
                        j--;
754 40
                        assert(j >= 1); /* True because n_max >= 2 */
755 40
                }
756 4360
                for (i = 0; i < j; i++) {
757 3640
                        CHECK_OBJ_NOTNULL(ocp[i], OBJCORE_MAGIC);
758 3640
                        if (is_purge)
759 3240
                                EXP_Remove(ocp[i], NULL);
760
                        else
761 400
                                EXP_Reduce(ocp[i], ttl_now, ttl, grace, keep);
762 3640
                        (void)HSH_DerefObjCore(wrk, &ocp[i], 0);
763 3640
                        AZ(ocp[i]);
764 3640
                        total++;
765 3640
                }
766
767 720
                if (j == n) {
768
                        /* No bookmark set, that means we got to the end
769
                         * of the objcore list in the previous run and are
770
                         * finished. */
771 680
                        break;
772
                }
773
774 40
                Lck_Lock(&oh->mtx);
775
776
                /* Move the bookmark first and continue scanning the
777
                 * objcores */
778 40
                CHECK_OBJ_NOTNULL(ocp[j], OBJCORE_MAGIC);
779 40
                ocp[0] = ocp[j];
780 40
                n = 1;
781 40
                oc = VTAILQ_NEXT(ocp[0], hsh_list);
782 40
                CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC);
783
        }
784
785 840
        WS_Release(wrk->aws, 0);
786 840
        if (is_purge)
787 440
                Pool_PurgeStat(total);
788 840
        return (total);
789
}
790
791
/*---------------------------------------------------------------------
792
 * Fail an objcore
793
 */
794
795
void
796 2600
HSH_Fail(struct objcore *oc)
797
{
798
        struct objhead *oh;
799
800 2600
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
801 2600
        oh = oc->objhead;
802 2600
        CHECK_OBJ(oh, OBJHEAD_MAGIC);
803
804
        /*
805
         * We have to have either a busy bit, so that HSH_Lookup
806
         * will not consider this oc, or an object hung of the oc
807
         * so that it can consider it.
808
         */
809 2600
        assert((oc->flags & OC_F_BUSY) || (oc->stobj->stevedore != NULL));
810
811 2600
        Lck_Lock(&oh->mtx);
812 2600
        oc->flags |= OC_F_FAILED;
813 2600
        Lck_Unlock(&oh->mtx);
814 2600
}
815
816
/*---------------------------------------------------------------------
817
 * Mark a fetch we will not need as cancelled
818
 */
819
820
static void
821 13996
hsh_cancel(struct objcore *oc)
822
{
823
        struct objhead *oh;
824
825 13996
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
826 13996
        oh = oc->objhead;
827 13996
        CHECK_OBJ(oh, OBJHEAD_MAGIC);
828
829 13996
        Lck_Lock(&oh->mtx);
830 13996
        oc->flags |= OC_F_CANCEL;
831 13996
        Lck_Unlock(&oh->mtx);
832 13996
}
833
834
/*---------------------------------------------------------------------
835
 * Cancel a fetch when the client does not need it any more
836
 */
837
838
void
839 148739
HSH_Cancel(struct worker *wrk, struct objcore *oc, struct boc *boc)
840
{
841 148739
        struct boc *bocref = NULL;
842
843 148739
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
844
845 148739
        if ((oc->flags & OC_F_TRANSIENT) == 0)
846 93339
                return;
847
848
        /*
849
         * NB: we use two distinct variables to only release the reference if
850
         * we had to acquire one. The caller-provided boc is optional.
851
         */
852 55400
        if (boc == NULL)
853 41529
                bocref = boc = HSH_RefBoc(oc);
854
855 55400
        CHECK_OBJ_ORNULL(boc, BOC_MAGIC);
856
857 55400
        if (oc->flags & OC_F_HFP)
858 880
                AN(oc->flags & OC_F_HFM);
859
860 55400
        if (boc != NULL) {
861 13995
                hsh_cancel(oc);
862 13995
                ObjWaitState(oc, BOS_FINISHED);
863 13995
        }
864
865 55400
        if (bocref != NULL)
866 127
                HSH_DerefBoc(wrk, oc);
867
868 55400
        ObjSlim(wrk, oc);
869 148739
}
870
871
/*---------------------------------------------------------------------
872
 * Unbusy an objcore when the object is completely fetched.
873
 */
874
875
void
876 87440
HSH_Unbusy(struct worker *wrk, struct objcore *oc)
877
{
878
        struct objhead *oh;
879
        struct rush rush;
880
881 87440
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
882 87440
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
883 87440
        CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC);
884
885 87440
        oh = oc->objhead;
886 87440
        CHECK_OBJ(oh, OBJHEAD_MAGIC);
887 87440
        INIT_OBJ(&rush, RUSH_MAGIC);
888
889 87440
        AN(oc->stobj->stevedore);
890 87440
        AN(oc->flags & OC_F_BUSY);
891 87440
        assert(oh->refcnt > 0);
892 87440
        assert(oc->refcnt > 0);
893
894 87440
        if (!(oc->flags & OC_F_PRIVATE)) {
895 56560
                BAN_NewObjCore(oc);
896 56560
                AN(oc->ban);
897 56560
        }
898
899
        /* XXX: pretouch neighbors on oh->objcs to prevent page-on under mtx */
900 87440
        Lck_Lock(&oh->mtx);
901 87440
        assert(oh->refcnt > 0);
902 87440
        assert(oc->refcnt > 0);
903 87440
        if (!(oc->flags & OC_F_PRIVATE))
904 56560
                EXP_RefNewObjcore(oc); /* Takes a ref for expiry */
905
        /* XXX: strictly speaking, we should sort in Date: order. */
906 87440
        VTAILQ_REMOVE(&oh->objcs, oc, hsh_list);
907 87440
        VTAILQ_INSERT_HEAD(&oh->objcs, oc, hsh_list);
908 87440
        oc->flags &= ~OC_F_BUSY;
909 87440
        if (!VTAILQ_EMPTY(&oh->waitinglist)) {
910 1110
                assert(oh->refcnt > 1);
911 1110
                hsh_rush1(wrk, oh, &rush, HSH_RUSH_POLICY);
912 1110
        }
913 87440
        Lck_Unlock(&oh->mtx);
914 87440
        EXP_Insert(wrk, oc); /* Does nothing unless EXP_RefNewObjcore was
915
                              * called */
916 87440
        hsh_rush2(wrk, &rush);
917 87440
}
918
919
/*====================================================================
920
 * HSH_Kill()
921
 *
922
 * It's dead Jim, kick it...
923
 */
924
925
void
926 6986
HSH_Kill(struct objcore *oc)
927
{
928
929 6986
        HSH_Replace(oc, NULL);
930 6986
}
931
932
void
933 9866
HSH_Replace(struct objcore *oc, const struct objcore *new_oc)
934
{
935
936 9866
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
937 9866
        CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC);
938 9866
        if (new_oc != NULL) {
939 2880
                CHECK_OBJ(new_oc, OBJCORE_MAGIC);
940 2880
                assert(oc->objhead == new_oc->objhead);
941 2880
        }
942
943 9866
        Lck_Lock(&oc->objhead->mtx);
944 9866
        oc->flags |= OC_F_DYING;
945 9866
        Lck_Unlock(&oc->objhead->mtx);
946 9866
        EXP_Remove(oc, new_oc);
947 9866
}
948
949
/*====================================================================
950
 * HSH_Snipe()
951
 *
952
 * If objcore is idle, gain a ref and mark it dead.
953
 */
954
955
int
956 440
HSH_Snipe(const struct worker *wrk, struct objcore *oc)
957
{
958 440
        int retval = 0;
959
960 440
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
961 440
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
962 440
        CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC);
963
964 440
        if (oc->refcnt == 1 && !Lck_Trylock(&oc->objhead->mtx)) {
965 440
                if (oc->refcnt == 1 && !(oc->flags & OC_F_DYING)) {
966 440
                        oc->flags |= OC_F_DYING;
967 440
                        oc->refcnt++;
968 440
                        retval = 1;
969 440
                }
970 440
                Lck_Unlock(&oc->objhead->mtx);
971 440
        }
972 440
        if (retval)
973 440
                EXP_Remove(oc, NULL);
974 440
        return (retval);
975
}
976
977
978
/*---------------------------------------------------------------------
979
 * Gain a reference on an objcore
980
 */
981
982
void
983 96200
HSH_Ref(struct objcore *oc)
984
{
985
        struct objhead *oh;
986
987 96200
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
988 96200
        oh = oc->objhead;
989 96200
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
990 96200
        Lck_Lock(&oh->mtx);
991 96200
        assert(oc->refcnt > 0);
992 96200
        oc->refcnt++;
993 96200
        Lck_Unlock(&oh->mtx);
994 96200
}
995
996
/*---------------------------------------------------------------------
997
 * Gain a reference on the busyobj, if the objcore has one
998
 */
999
1000
struct boc *
1001 373781
HSH_RefBoc(const struct objcore *oc)
1002
{
1003
        struct objhead *oh;
1004
        struct boc *boc;
1005
1006 373781
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
1007 373781
        oh = oc->objhead;
1008 373781
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
1009 373781
        if (oc->boc == NULL)
1010 218469
                return (NULL);
1011 155312
        Lck_Lock(&oh->mtx);
1012 155312
        assert(oc->refcnt > 0);
1013 155312
        boc = oc->boc;
1014 155312
        CHECK_OBJ_ORNULL(boc, BOC_MAGIC);
1015 155312
        if (boc != NULL) {
1016 155236
                assert(boc->refcount > 0);
1017 155236
                if (boc->state < BOS_FINISHED)
1018 154097
                        boc->refcount++;
1019
                else
1020 1139
                        boc = NULL;
1021 155236
        }
1022 155312
        Lck_Unlock(&oh->mtx);
1023 155312
        return (boc);
1024 373781
}
1025
1026
void
1027 268933
HSH_DerefBoc(struct worker *wrk, struct objcore *oc)
1028
{
1029
        struct boc *boc;
1030
        unsigned r;
1031
1032 268933
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
1033 268933
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
1034 268933
        boc = oc->boc;
1035 268933
        CHECK_OBJ_NOTNULL(boc, BOC_MAGIC);
1036 268933
        Lck_Lock(&oc->objhead->mtx);
1037 268933
        assert(oc->refcnt > 0);
1038 268933
        assert(boc->refcount > 0);
1039 268933
        r = --boc->refcount;
1040 268933
        if (r == 0)
1041 114879
                oc->boc = NULL;
1042 268933
        Lck_Unlock(&oc->objhead->mtx);
1043 268933
        if (r == 0)
1044 114880
                ObjBocDone(wrk, oc, &boc);
1045 268933
}
1046
1047
/*--------------------------------------------------------------------
1048
 * Dereference objcore
1049
 *
1050
 * Returns zero if target was destroyed.
1051
 */
1052
1053
int
1054 278975
HSH_DerefObjCore(struct worker *wrk, struct objcore **ocp, int rushmax)
1055
{
1056
        struct objcore *oc;
1057
        struct objhead *oh;
1058
        struct rush rush;
1059
        int r;
1060
1061 278975
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
1062 278975
        TAKE_OBJ_NOTNULL(oc, ocp, OBJCORE_MAGIC);
1063 278975
        assert(oc->refcnt > 0);
1064 278975
        INIT_OBJ(&rush, RUSH_MAGIC);
1065
1066 278975
        oh = oc->objhead;
1067 278975
        CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
1068
1069 278975
        Lck_Lock(&oh->mtx);
1070 278975
        assert(oh->refcnt > 0);
1071 278975
        r = --oc->refcnt;
1072 278975
        if (!r)
1073 73629
                VTAILQ_REMOVE(&oh->objcs, oc, hsh_list);
1074 278975
        if (!VTAILQ_EMPTY(&oh->waitinglist)) {
1075 144
                assert(oh->refcnt > 1);
1076 144
                hsh_rush1(wrk, oh, &rush, rushmax);
1077 144
        }
1078 278975
        Lck_Unlock(&oh->mtx);
1079 278975
        hsh_rush2(wrk, &rush);
1080 278975
        if (r != 0)
1081 205346
                return (r);
1082
1083 73629
        AZ(oc->exp_flags);
1084
1085 73629
        BAN_DestroyObj(oc);
1086 73629
        AZ(oc->ban);
1087
1088 73629
        if (oc->stobj->stevedore != NULL)
1089 70208
                ObjFreeObj(wrk, oc);
1090 73629
        ObjDestroy(wrk, &oc);
1091
1092
        /* Drop our ref on the objhead */
1093 73629
        assert(oh->refcnt > 0);
1094 73629
        (void)hsh_deref_objhead(wrk, &oh);
1095 73629
        return (0);
1096 278975
}
1097
1098
static int
1099 115022
hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh, int max)
1100
{
1101
        struct objhead *oh;
1102
        struct rush rush;
1103
        int r;
1104
1105 115022
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
1106 115022
        TAKE_OBJ_NOTNULL(oh, poh, OBJHEAD_MAGIC);
1107
1108 115022
        Lck_AssertHeld(&oh->mtx);
1109
1110 115022
        if (oh == private_oh) {
1111 55840
                assert(VTAILQ_EMPTY(&oh->waitinglist));
1112 55840
                assert(oh->refcnt > 1);
1113 55840
                oh->refcnt--;
1114 55840
                Lck_Unlock(&oh->mtx);
1115 55840
                return (1);
1116
        }
1117
1118 59182
        INIT_OBJ(&rush, RUSH_MAGIC);
1119 59182
        if (!VTAILQ_EMPTY(&oh->waitinglist)) {
1120 222
                assert(oh->refcnt > 1);
1121 222
                hsh_rush1(wrk, oh, &rush, max);
1122 222
        }
1123
1124 59182
        if (oh->refcnt == 1)
1125 7060
                assert(VTAILQ_EMPTY(&oh->waitinglist));
1126
1127 59182
        assert(oh->refcnt > 0);
1128 59182
        r = hash->deref(wrk, oh); /* Unlocks oh->mtx */
1129 59182
        hsh_rush2(wrk, &rush);
1130 59182
        return (r);
1131 115022
}
1132
1133
static int
1134 73630
hsh_deref_objhead(struct worker *wrk, struct objhead **poh)
1135
{
1136
        struct objhead *oh;
1137
1138 73630
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
1139 73630
        TAKE_OBJ_NOTNULL(oh, poh, OBJHEAD_MAGIC);
1140
1141 73630
        Lck_Lock(&oh->mtx);
1142 73630
        return (hsh_deref_objhead_unlock(wrk, &oh, 0));
1143
}
1144
1145
void
1146 37144
HSH_Init(const struct hash_slinger *slinger)
1147
{
1148
1149 37144
        assert(DIGEST_LEN == VSHA256_LEN);      /* avoid #include pollution */
1150 37144
        hash = slinger;
1151 37144
        if (hash->start != NULL)
1152 37144
                hash->start();
1153 37144
        private_oh = hsh_newobjhead();
1154 37144
        private_oh->refcnt = 1;
1155 37144
}