| | varnish-cache/bin/varnishd/cache/cache_hash.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2006 Verdens Gang AS |
| 2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
| 3 |
|
* All rights reserved. |
| 4 |
|
* |
| 5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 6 |
|
* |
| 7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 8 |
|
* |
| 9 |
|
* Redistribution and use in source and binary forms, with or without |
| 10 |
|
* modification, are permitted provided that the following conditions |
| 11 |
|
* are met: |
| 12 |
|
* 1. Redistributions of source code must retain the above copyright |
| 13 |
|
* notice, this list of conditions and the following disclaimer. |
| 14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 15 |
|
* notice, this list of conditions and the following disclaimer in the |
| 16 |
|
* documentation and/or other materials provided with the distribution. |
| 17 |
|
* |
| 18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 |
|
* SUCH DAMAGE. |
| 29 |
|
* |
| 30 |
|
* This is the central hash-table code, it relies on a chosen hash |
| 31 |
|
* implementation only for the actual hashing, all the housekeeping |
| 32 |
|
* happens here. |
| 33 |
|
* |
| 34 |
|
* We have two kinds of structures, objecthead and object. An objecthead |
| 35 |
|
* corresponds to a given (Host:, URL) tuple, and the objects hung from |
| 36 |
|
* the objecthead may represent various variations (ie: Vary: header, |
| 37 |
|
* different TTL etc) instances of that web-entity. |
| 38 |
|
* |
| 39 |
|
* Each objecthead has a mutex which locks both its own fields, the |
| 40 |
|
* list of objects and fields in the objects. |
| 41 |
|
* |
| 42 |
|
* The hash implementation must supply a reference count facility on |
| 43 |
|
* the objecthead, and return with a reference held after a lookup. |
| 44 |
|
* |
| 45 |
|
* Lookups in the hash implementation returns with a ref held and each |
| 46 |
|
* object hung from the objhead holds a ref as well. |
| 47 |
|
* |
| 48 |
|
* Objects have refcounts which are locked by the objecthead mutex. |
| 49 |
|
* |
| 50 |
|
* New objects are always marked busy, and they can go from busy to |
| 51 |
|
* not busy only once. |
| 52 |
|
*/ |
| 53 |
|
|
| 54 |
|
#include "config.h" |
| 55 |
|
|
| 56 |
|
#include <stdio.h> |
| 57 |
|
#include <stdlib.h> |
| 58 |
|
|
| 59 |
|
#include "cache_varnishd.h" |
| 60 |
|
|
| 61 |
|
#include "cache/cache_objhead.h" |
| 62 |
|
#include "cache/cache_transport.h" |
| 63 |
|
|
| 64 |
|
#include "hash/hash_slinger.h" |
| 65 |
|
|
| 66 |
|
#include "vsha256.h" |
| 67 |
|
|
| 68 |
|
struct rush { |
| 69 |
|
unsigned magic; |
| 70 |
|
#define RUSH_MAGIC 0xa1af5f01 |
| 71 |
|
VTAILQ_HEAD(,req) reqs; |
| 72 |
|
}; |
| 73 |
|
|
| 74 |
|
static const struct hash_slinger *hash; |
| 75 |
|
#define PRIVATE_OH_EXP 7 |
| 76 |
|
static struct objhead private_ohs[1 << PRIVATE_OH_EXP]; |
| 77 |
|
|
| 78 |
|
static void hsh_rush1(const struct worker *, struct objcore *, |
| 79 |
|
struct rush *); |
| 80 |
|
static void hsh_rush2(struct worker *, struct rush *); |
| 81 |
|
static int hsh_deref_objhead(struct worker *wrk, struct objhead **poh); |
| 82 |
|
static int hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh, |
| 83 |
|
struct objcore *oc); |
| 84 |
|
static int hsh_deref_objcore_unlock(struct worker *, struct objcore **); |
| 85 |
|
|
| 86 |
|
/*---------------------------------------------------------------------*/ |
| 87 |
|
|
| 88 |
|
#define VCF_RETURN(x) const struct vcf_return VCF_##x[1] = { \ |
| 89 |
|
{ .name = #x, } \ |
| 90 |
|
}; |
| 91 |
|
|
| 92 |
|
VCF_RETURNS() |
| 93 |
|
#undef VCF_RETURN |
| 94 |
|
|
| 95 |
|
/*---------------------------------------------------------------------*/ |
| 96 |
|
|
| 97 |
|
static void |
| 98 |
4931741 |
hsh_initobjhead(struct objhead *oh) |
| 99 |
|
{ |
| 100 |
|
|
| 101 |
4931741 |
XXXAN(oh); |
| 102 |
4931741 |
INIT_OBJ(oh, OBJHEAD_MAGIC); |
| 103 |
4931741 |
oh->refcnt = 1; |
| 104 |
4931741 |
oh->waitinglist_gen = 1; |
| 105 |
4931741 |
VTAILQ_INIT(&oh->objcs); |
| 106 |
4931741 |
VTAILQ_INIT(&oh->waitinglist); |
| 107 |
4931741 |
Lck_New(&oh->mtx, lck_objhdr); |
| 108 |
4931741 |
} |
| 109 |
|
|
| 110 |
|
static struct objhead * |
| 111 |
63645 |
hsh_newobjhead(void) |
| 112 |
|
{ |
| 113 |
63645 |
struct objhead *oh = malloc(sizeof *oh); |
| 114 |
63645 |
hsh_initobjhead(oh); |
| 115 |
63645 |
return (oh); |
| 116 |
|
} |
| 117 |
|
|
| 118 |
|
/*---------------------------------------------------------------------*/ |
| 119 |
|
/* Precreate an objhead and object for later use */ |
| 120 |
|
static void |
| 121 |
107546 |
hsh_prealloc(struct worker *wrk) |
| 122 |
|
{ |
| 123 |
|
|
| 124 |
107546 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 125 |
|
|
| 126 |
107546 |
if (wrk->wpriv->nobjcore == NULL) |
| 127 |
72760 |
wrk->wpriv->nobjcore = ObjNew(wrk); |
| 128 |
107546 |
CHECK_OBJ_NOTNULL(wrk->wpriv->nobjcore, OBJCORE_MAGIC); |
| 129 |
|
|
| 130 |
107546 |
if (wrk->wpriv->nobjhead == NULL) { |
| 131 |
63645 |
wrk->wpriv->nobjhead = hsh_newobjhead(); |
| 132 |
63645 |
wrk->stats->n_objecthead++; |
| 133 |
63645 |
} |
| 134 |
107546 |
CHECK_OBJ_NOTNULL(wrk->wpriv->nobjhead, OBJHEAD_MAGIC); |
| 135 |
|
|
| 136 |
107546 |
if (hash->prep != NULL) |
| 137 |
107303 |
hash->prep(wrk); |
| 138 |
107546 |
} |
| 139 |
|
|
| 140 |
|
/*---------------------------------------------------------------------*/ |
| 141 |
|
|
| 142 |
|
// https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/ |
| 143 |
|
static inline size_t |
| 144 |
56793 |
fib(uint64_t n, uint8_t bits) |
| 145 |
|
{ |
| 146 |
56793 |
const uint64_t gr = 11400714819323198485LLU; |
| 147 |
|
uint64_t r; |
| 148 |
|
|
| 149 |
56793 |
r = n * gr; |
| 150 |
56793 |
r >>= (sizeof(gr) * 8) - bits; |
| 151 |
56793 |
assert(r < (size_t)1 << bits); |
| 152 |
56793 |
return ((size_t)r); |
| 153 |
|
} |
| 154 |
|
|
| 155 |
|
struct objcore * |
| 156 |
56800 |
HSH_Private(const struct worker *wrk) |
| 157 |
|
{ |
| 158 |
|
struct objcore *oc; |
| 159 |
|
struct objhead *oh; |
| 160 |
|
|
| 161 |
56800 |
oh = &private_ohs[fib((uintptr_t)wrk, PRIVATE_OH_EXP)]; |
| 162 |
56800 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 163 |
|
|
| 164 |
56800 |
oc = ObjNew(wrk); |
| 165 |
56800 |
AN(oc); |
| 166 |
56800 |
oc->refcnt = 1; |
| 167 |
56800 |
oc->objhead = oh; |
| 168 |
56800 |
oc->flags |= OC_F_PRIVATE; |
| 169 |
56800 |
Lck_Lock(&oh->mtx); |
| 170 |
56800 |
VTAILQ_INSERT_TAIL(&oh->objcs, oc, hsh_list); |
| 171 |
56800 |
oh->refcnt++; |
| 172 |
56800 |
Lck_Unlock(&oh->mtx); |
| 173 |
56800 |
return (oc); |
| 174 |
|
} |
| 175 |
|
|
| 176 |
|
/*---------------------------------------------------------------------*/ |
| 177 |
|
|
| 178 |
|
void |
| 179 |
38487 |
HSH_Cleanup(const struct worker *wrk) |
| 180 |
|
{ |
| 181 |
|
|
| 182 |
38487 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 183 |
38487 |
CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC); |
| 184 |
38487 |
if (wrk->wpriv->nobjcore != NULL) |
| 185 |
67 |
ObjDestroy(wrk, &wrk->wpriv->nobjcore); |
| 186 |
|
|
| 187 |
38487 |
if (wrk->wpriv->nobjhead != NULL) { |
| 188 |
67 |
CHECK_OBJ(wrk->wpriv->nobjhead, OBJHEAD_MAGIC); |
| 189 |
67 |
Lck_Delete(&wrk->wpriv->nobjhead->mtx); |
| 190 |
67 |
FREE_OBJ(wrk->wpriv->nobjhead); |
| 191 |
67 |
wrk->stats->n_objecthead--; |
| 192 |
67 |
} |
| 193 |
38487 |
if (wrk->wpriv->nhashpriv != NULL) { |
| 194 |
|
/* XXX: If needed, add slinger method for this */ |
| 195 |
80 |
free(wrk->wpriv->nhashpriv); |
| 196 |
80 |
wrk->wpriv->nhashpriv = NULL; |
| 197 |
80 |
} |
| 198 |
38487 |
} |
| 199 |
|
|
| 200 |
|
void |
| 201 |
0 |
HSH_DeleteObjHead(const struct worker *wrk, struct objhead *oh) |
| 202 |
|
{ |
| 203 |
0 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 204 |
0 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 205 |
|
|
| 206 |
0 |
AZ(oh->refcnt); |
| 207 |
0 |
assert(VTAILQ_EMPTY(&oh->objcs)); |
| 208 |
0 |
assert(VTAILQ_EMPTY(&oh->waitinglist)); |
| 209 |
0 |
Lck_Delete(&oh->mtx); |
| 210 |
0 |
wrk->stats->n_objecthead--; |
| 211 |
0 |
FREE_OBJ(oh); |
| 212 |
0 |
} |
| 213 |
|
|
| 214 |
|
void |
| 215 |
593889 |
HSH_AddString(struct req *req, void *ctx, const char *str) |
| 216 |
|
{ |
| 217 |
|
|
| 218 |
593889 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
| 219 |
593889 |
AN(ctx); |
| 220 |
593889 |
if (str != NULL) { |
| 221 |
296921 |
VSHA256_Update(ctx, str, strlen(str)); |
| 222 |
296921 |
VSLbs(req->vsl, SLT_Hash, TOSTRAND(str)); |
| 223 |
296921 |
} else |
| 224 |
296968 |
VSHA256_Update(ctx, &str, 1); |
| 225 |
593889 |
} |
| 226 |
|
|
| 227 |
|
/*--------------------------------------------------------------------- |
| 228 |
|
* This is a debugging hack to enable testing of boundary conditions |
| 229 |
|
* in the hash algorithm. |
| 230 |
|
* We trap the first 9 different digests and translate them to different |
| 231 |
|
* digests with edge bit conditions |
| 232 |
|
*/ |
| 233 |
|
|
| 234 |
|
static struct hsh_magiclist { |
| 235 |
|
unsigned char was[VSHA256_LEN]; |
| 236 |
|
unsigned char now[VSHA256_LEN]; |
| 237 |
|
} hsh_magiclist[] = { |
| 238 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 239 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 240 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 241 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
| 242 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 243 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 244 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 245 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 } }, |
| 246 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 247 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 248 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 249 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 } }, |
| 250 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 251 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 252 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 253 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40 } }, |
| 254 |
|
{ .now = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 255 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 256 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 257 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 } }, |
| 258 |
|
{ .now = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 259 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 260 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 261 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
| 262 |
|
{ .now = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 263 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 264 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 265 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
| 266 |
|
{ .now = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 267 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 268 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 269 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
| 270 |
|
{ .now = { 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 271 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 272 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 273 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, |
| 274 |
|
}; |
| 275 |
|
|
| 276 |
|
#define HSH_NMAGIC vcountof(hsh_magiclist) |
| 277 |
|
|
| 278 |
|
static void |
| 279 |
720 |
hsh_testmagic(void *result) |
| 280 |
|
{ |
| 281 |
|
size_t i, j; |
| 282 |
|
static size_t nused = 0; |
| 283 |
|
|
| 284 |
3600 |
for (i = 0; i < nused; i++) |
| 285 |
3240 |
if (!memcmp(hsh_magiclist[i].was, result, VSHA256_LEN)) |
| 286 |
360 |
break; |
| 287 |
720 |
if (i == nused && i < HSH_NMAGIC) |
| 288 |
360 |
memcpy(hsh_magiclist[nused++].was, result, VSHA256_LEN); |
| 289 |
720 |
if (i == nused) |
| 290 |
0 |
return; |
| 291 |
720 |
assert(i < HSH_NMAGIC); |
| 292 |
720 |
fprintf(stderr, "HASHMAGIC: <"); |
| 293 |
23760 |
for (j = 0; j < VSHA256_LEN; j++) |
| 294 |
23040 |
fprintf(stderr, "%02x", ((unsigned char*)result)[j]); |
| 295 |
720 |
fprintf(stderr, "> -> <"); |
| 296 |
720 |
memcpy(result, hsh_magiclist[i].now, VSHA256_LEN); |
| 297 |
23760 |
for (j = 0; j < VSHA256_LEN; j++) |
| 298 |
23040 |
fprintf(stderr, "%02x", ((unsigned char*)result)[j]); |
| 299 |
720 |
fprintf(stderr, ">\n"); |
| 300 |
720 |
} |
| 301 |
|
|
| 302 |
|
/*--------------------------------------------------------------------- |
| 303 |
|
* Insert an object which magically appears out of nowhere or, more likely, |
| 304 |
|
* comes off some persistent storage device. |
| 305 |
|
* Insert it with a reference held. |
| 306 |
|
*/ |
| 307 |
|
|
| 308 |
|
void |
| 309 |
680 |
HSH_Insert(struct worker *wrk, const void *digest, struct objcore *oc, |
| 310 |
|
struct ban *ban) |
| 311 |
|
{ |
| 312 |
|
struct objhead *oh; |
| 313 |
|
struct rush rush; |
| 314 |
|
|
| 315 |
680 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 316 |
680 |
CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC); |
| 317 |
680 |
AN(digest); |
| 318 |
680 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 319 |
680 |
AN(ban); |
| 320 |
680 |
AZ(oc->flags & OC_F_BUSY); |
| 321 |
680 |
AZ(oc->flags & OC_F_PRIVATE); |
| 322 |
680 |
assert(oc->refcnt == 1); |
| 323 |
680 |
INIT_OBJ(&rush, RUSH_MAGIC); |
| 324 |
|
|
| 325 |
680 |
hsh_prealloc(wrk); |
| 326 |
|
|
| 327 |
680 |
AN(wrk->wpriv->nobjhead); |
| 328 |
680 |
oh = hash->lookup(wrk, digest, &wrk->wpriv->nobjhead); |
| 329 |
680 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 330 |
680 |
Lck_AssertHeld(&oh->mtx); |
| 331 |
680 |
assert(oh->refcnt > 0); |
| 332 |
|
|
| 333 |
|
/* Mark object busy and insert (precreated) objcore in |
| 334 |
|
objecthead. The new object inherits our objhead reference. */ |
| 335 |
680 |
oc->objhead = oh; |
| 336 |
680 |
VTAILQ_INSERT_TAIL(&oh->objcs, oc, hsh_list); |
| 337 |
680 |
EXP_RefNewObjcore(oc); |
| 338 |
680 |
Lck_Unlock(&oh->mtx); |
| 339 |
|
|
| 340 |
680 |
BAN_RefBan(oc, ban); |
| 341 |
680 |
AN(oc->ban); |
| 342 |
|
|
| 343 |
|
/* Move the object first in the oh list, unbusy it and run the |
| 344 |
|
waitinglist if necessary */ |
| 345 |
680 |
Lck_Lock(&oh->mtx); |
| 346 |
680 |
VTAILQ_REMOVE(&oh->objcs, oc, hsh_list); |
| 347 |
680 |
VTAILQ_INSERT_HEAD(&oh->objcs, oc, hsh_list); |
| 348 |
680 |
if (!VTAILQ_EMPTY(&oh->waitinglist)) |
| 349 |
0 |
hsh_rush1(wrk, oc, &rush); |
| 350 |
680 |
Lck_Unlock(&oh->mtx); |
| 351 |
680 |
hsh_rush2(wrk, &rush); |
| 352 |
|
|
| 353 |
680 |
EXP_Insert(wrk, oc); |
| 354 |
680 |
} |
| 355 |
|
|
| 356 |
|
/*--------------------------------------------------------------------- |
| 357 |
|
*/ |
| 358 |
|
|
| 359 |
|
static struct objcore * |
| 360 |
61265 |
hsh_insert_busyobj(const struct worker *wrk, struct objhead *oh) |
| 361 |
|
{ |
| 362 |
|
struct objcore *oc; |
| 363 |
|
|
| 364 |
61265 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 365 |
61265 |
CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC); |
| 366 |
61265 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 367 |
61265 |
Lck_AssertHeld(&oh->mtx); |
| 368 |
|
|
| 369 |
61265 |
oc = wrk->wpriv->nobjcore; |
| 370 |
61265 |
wrk->wpriv->nobjcore = NULL; |
| 371 |
61265 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 372 |
|
|
| 373 |
61265 |
AZ(oc->flags & OC_F_BUSY); |
| 374 |
61265 |
oc->flags |= OC_F_BUSY; |
| 375 |
61265 |
oc->refcnt = 1; /* Owned by busyobj */ |
| 376 |
61265 |
oc->objhead = oh; |
| 377 |
61265 |
VTAILQ_INSERT_TAIL(&oh->objcs, oc, hsh_list); |
| 378 |
61265 |
return (oc); |
| 379 |
|
} |
| 380 |
|
|
| 381 |
|
/*--------------------------------------------------------------------- |
| 382 |
|
*/ |
| 383 |
|
|
| 384 |
|
static int |
| 385 |
157254 |
hsh_vry_match(const struct req *req, struct objcore *oc, const uint8_t *vary) |
| 386 |
|
{ |
| 387 |
|
|
| 388 |
157254 |
if (req->hash_ignore_vary) |
| 389 |
40 |
return (1); |
| 390 |
157214 |
if (vary == NULL) { |
| 391 |
157138 |
if (! ObjHasAttr(req->wrk, oc, OA_VARY)) |
| 392 |
44258 |
return (1); |
| 393 |
112880 |
vary = ObjGetAttr(req->wrk, oc, OA_VARY, NULL); |
| 394 |
112880 |
AN(vary); |
| 395 |
112880 |
} |
| 396 |
112956 |
return (VRY_Match(req, vary)); |
| 397 |
157254 |
} |
| 398 |
|
|
| 399 |
|
static unsigned |
| 400 |
2195 |
hsh_rush_match(const struct req *req) |
| 401 |
|
{ |
| 402 |
|
struct objhead *oh; |
| 403 |
|
struct objcore *oc; |
| 404 |
|
|
| 405 |
2195 |
oc = req->objcore; |
| 406 |
2195 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 407 |
2195 |
assert(oc->refcnt > 0); |
| 408 |
|
|
| 409 |
2195 |
AZ(oc->flags & OC_F_BUSY); |
| 410 |
2195 |
AZ(oc->flags & OC_F_PRIVATE); |
| 411 |
2195 |
if (oc->flags & (OC_F_WITHDRAWN|OC_F_HFM|OC_F_HFP|OC_F_CANCEL| |
| 412 |
|
OC_F_FAILED)) |
| 413 |
478 |
return (0); |
| 414 |
|
|
| 415 |
1717 |
if (req->vcf != NULL) /* NB: must operate under oh lock. */ |
| 416 |
0 |
return (0); |
| 417 |
|
|
| 418 |
1717 |
oh = oc->objhead; |
| 419 |
1717 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 420 |
|
|
| 421 |
1717 |
return (hsh_vry_match(req, oc, NULL)); |
| 422 |
2195 |
} |
| 423 |
|
|
| 424 |
|
/*--------------------------------------------------------------------- |
| 425 |
|
*/ |
| 426 |
|
|
| 427 |
|
enum lookup_e |
| 428 |
106866 |
HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp) |
| 429 |
|
{ |
| 430 |
|
struct worker *wrk; |
| 431 |
|
struct objhead *oh; |
| 432 |
|
struct objcore *oc; |
| 433 |
|
struct objcore *exp_oc; |
| 434 |
|
const struct vcf_return *vr; |
| 435 |
|
vtim_real exp_t_origin; |
| 436 |
|
int busy_found; |
| 437 |
|
intmax_t boc_progress; |
| 438 |
106866 |
unsigned xid = 0; |
| 439 |
|
unsigned ban_checks; |
| 440 |
|
unsigned ban_any_variant; |
| 441 |
106866 |
float dttl = 0.0; |
| 442 |
|
|
| 443 |
106866 |
AN(ocp); |
| 444 |
106866 |
*ocp = NULL; |
| 445 |
106866 |
AN(bocp); |
| 446 |
106866 |
*bocp = NULL; |
| 447 |
|
|
| 448 |
106866 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
| 449 |
106866 |
wrk = req->wrk; |
| 450 |
106866 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 451 |
106866 |
CHECK_OBJ_NOTNULL(wrk->wpriv, WORKER_PRIV_MAGIC); |
| 452 |
106866 |
CHECK_OBJ_NOTNULL(req->http, HTTP_MAGIC); |
| 453 |
106866 |
CHECK_OBJ_ORNULL(req->objcore, OBJCORE_MAGIC); |
| 454 |
106866 |
CHECK_OBJ_ORNULL(req->vcf, VCF_MAGIC); |
| 455 |
106866 |
AN(hash); |
| 456 |
|
|
| 457 |
106866 |
hsh_prealloc(wrk); |
| 458 |
106866 |
if (DO_DEBUG(DBG_HASHEDGE)) |
| 459 |
720 |
hsh_testmagic(req->digest); |
| 460 |
|
|
| 461 |
|
/* |
| 462 |
|
* When a req rushes off the waiting list, it brings an implicit |
| 463 |
|
* oh refcnt acquired at disembark time and an oc ref (with its |
| 464 |
|
* own distinct oh ref) acquired during rush hour. |
| 465 |
|
*/ |
| 466 |
|
|
| 467 |
106866 |
if (req->objcore != NULL && hsh_rush_match(req)) { |
| 468 |
1639 |
TAKE_OBJ_NOTNULL(oc, &req->objcore, OBJCORE_MAGIC); |
| 469 |
1639 |
*ocp = oc; |
| 470 |
1639 |
oh = oc->objhead; |
| 471 |
1639 |
Lck_Lock(&oh->mtx); |
| 472 |
1639 |
oc->hits++; |
| 473 |
1639 |
boc_progress = oc->boc == NULL ? -1 : oc->boc->fetched_so_far; |
| 474 |
1639 |
AN(hsh_deref_objhead_unlock(wrk, &oh, oc)); |
| 475 |
1639 |
Req_LogHit(wrk, req, oc, boc_progress); |
| 476 |
|
/* NB: since this hit comes from the waiting list instead of |
| 477 |
|
* a regular lookup, grace is not considered. The object is |
| 478 |
|
* fresh in the context of the waiting list, even expired: it |
| 479 |
|
* was successfully just [re]validated by a fetch task. |
| 480 |
|
*/ |
| 481 |
1639 |
return (HSH_HIT); |
| 482 |
|
} |
| 483 |
|
|
| 484 |
105227 |
if (req->objcore != NULL) { |
| 485 |
571 |
oh = req->objcore->objhead; |
| 486 |
571 |
(void)HSH_DerefObjCore(wrk, &req->objcore); |
| 487 |
571 |
Lck_Lock(&oh->mtx); |
| 488 |
571 |
} else { |
| 489 |
104656 |
AN(wrk->wpriv->nobjhead); |
| 490 |
104656 |
oh = hash->lookup(wrk, req->digest, &wrk->wpriv->nobjhead); |
| 491 |
|
} |
| 492 |
|
|
| 493 |
105227 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 494 |
105227 |
Lck_AssertHeld(&oh->mtx); |
| 495 |
|
|
| 496 |
105227 |
if (req->hash_always_miss) { |
| 497 |
|
/* XXX: should we do predictive Vary in this case ? */ |
| 498 |
|
/* Insert new objcore in objecthead and release mutex */ |
| 499 |
680 |
*bocp = hsh_insert_busyobj(wrk, oh); |
| 500 |
|
/* NB: no deref of objhead, new object inherits reference */ |
| 501 |
680 |
Lck_Unlock(&oh->mtx); |
| 502 |
680 |
return (HSH_MISS); |
| 503 |
|
} |
| 504 |
|
|
| 505 |
104547 |
assert(oh->refcnt > 0); |
| 506 |
104547 |
busy_found = 0; |
| 507 |
104547 |
exp_oc = NULL; |
| 508 |
104547 |
exp_t_origin = 0.0; |
| 509 |
104547 |
ban_checks = 0; |
| 510 |
104547 |
ban_any_variant = cache_param->ban_any_variant; |
| 511 |
221323 |
VTAILQ_FOREACH(oc, &oh->objcs, hsh_list) { |
| 512 |
|
/* Must be at least our own ref + the objcore we examine */ |
| 513 |
159780 |
assert(oh->refcnt > 1); |
| 514 |
159780 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 515 |
159780 |
assert(oc->objhead == oh); |
| 516 |
159780 |
assert(oc->refcnt > 0); |
| 517 |
|
|
| 518 |
159780 |
if (oc->flags & OC_F_DYING) |
| 519 |
0 |
continue; |
| 520 |
159780 |
if (oc->flags & OC_F_FAILED) |
| 521 |
1 |
continue; |
| 522 |
|
|
| 523 |
159779 |
CHECK_OBJ_ORNULL(oc->boc, BOC_MAGIC); |
| 524 |
159779 |
if (oc->flags & OC_F_BUSY) { |
| 525 |
2438 |
if (req->hash_ignore_busy) |
| 526 |
40 |
continue; |
| 527 |
|
|
| 528 |
2398 |
if (oc->boc && oc->boc->vary != NULL && |
| 529 |
76 |
!hsh_vry_match(req, oc, oc->boc->vary)) { |
| 530 |
40 |
wrk->strangelove++; |
| 531 |
40 |
continue; |
| 532 |
|
} |
| 533 |
|
|
| 534 |
2358 |
busy_found = 1; |
| 535 |
2358 |
continue; |
| 536 |
|
} |
| 537 |
|
|
| 538 |
157341 |
if (oc->ttl <= 0.) |
| 539 |
1880 |
continue; |
| 540 |
|
|
| 541 |
155461 |
if (ban_checks++ < ban_any_variant |
| 542 |
155461 |
&& BAN_CheckObject(wrk, oc, req)) { |
| 543 |
0 |
oc->flags |= OC_F_DYING; |
| 544 |
0 |
EXP_Remove(oc, NULL); |
| 545 |
0 |
continue; |
| 546 |
|
} |
| 547 |
|
|
| 548 |
155461 |
if (!hsh_vry_match(req, oc, NULL)) { |
| 549 |
105040 |
wrk->strangelove++; |
| 550 |
105040 |
continue; |
| 551 |
|
} |
| 552 |
|
|
| 553 |
50421 |
if (ban_checks >= ban_any_variant |
| 554 |
50421 |
&& BAN_CheckObject(wrk, oc, req)) { |
| 555 |
1360 |
oc->flags |= OC_F_DYING; |
| 556 |
1360 |
EXP_Remove(oc, NULL); |
| 557 |
1360 |
continue; |
| 558 |
|
} |
| 559 |
|
|
| 560 |
49061 |
if (req->vcf != NULL) { |
| 561 |
320 |
vr = req->vcf->func(req, &oc, &exp_oc, 0); |
| 562 |
320 |
if (vr == VCF_CONTINUE) |
| 563 |
160 |
continue; |
| 564 |
160 |
if (vr == VCF_MISS) { |
| 565 |
120 |
oc = NULL; |
| 566 |
120 |
break; |
| 567 |
|
} |
| 568 |
40 |
if (vr == VCF_HIT) |
| 569 |
40 |
break; |
| 570 |
0 |
assert(vr == VCF_DEFAULT); |
| 571 |
0 |
} |
| 572 |
|
|
| 573 |
48741 |
if (EXP_Ttl(req, oc) > req->t_req) { |
| 574 |
42844 |
assert(oh->refcnt > 1); |
| 575 |
42844 |
assert(oc->objhead == oh); |
| 576 |
42844 |
break; |
| 577 |
|
} |
| 578 |
|
|
| 579 |
5897 |
if (EXP_Ttl(NULL, oc) <= req->t_req && /* ignore req.max_age */ |
| 580 |
5760 |
oc->t_origin > exp_t_origin) { |
| 581 |
|
/* record the newest object */ |
| 582 |
5720 |
exp_oc = oc; |
| 583 |
5720 |
exp_t_origin = oc->t_origin; |
| 584 |
5720 |
assert(oh->refcnt > 1); |
| 585 |
5720 |
assert(exp_oc->objhead == oh); |
| 586 |
5720 |
} |
| 587 |
5897 |
} |
| 588 |
|
|
| 589 |
104547 |
if (req->vcf != NULL) |
| 590 |
240 |
(void)req->vcf->func(req, &oc, &exp_oc, 1); |
| 591 |
|
|
| 592 |
104547 |
if (oc != NULL && oc->flags & OC_F_HFP) { |
| 593 |
440 |
xid = VXID(ObjGetXID(wrk, oc)); |
| 594 |
440 |
dttl = EXP_Dttl(req, oc); |
| 595 |
440 |
AN(hsh_deref_objhead_unlock(wrk, &oh, oc)); |
| 596 |
440 |
wrk->stats->cache_hitpass++; |
| 597 |
440 |
VSLb(req->vsl, SLT_HitPass, "%u %.6f", xid, dttl); |
| 598 |
440 |
return (HSH_HITPASS); |
| 599 |
|
} |
| 600 |
|
|
| 601 |
104107 |
if (oc != NULL) { |
| 602 |
42484 |
*ocp = oc; |
| 603 |
42484 |
oc->refcnt++; |
| 604 |
42484 |
if (oc->flags & OC_F_HFM) { |
| 605 |
1280 |
xid = VXID(ObjGetXID(wrk, oc)); |
| 606 |
1280 |
dttl = EXP_Dttl(req, oc); |
| 607 |
1280 |
*bocp = hsh_insert_busyobj(wrk, oh); |
| 608 |
1280 |
Lck_Unlock(&oh->mtx); |
| 609 |
1280 |
wrk->stats->cache_hitmiss++; |
| 610 |
1280 |
VSLb(req->vsl, SLT_HitMiss, "%u %.6f", xid, dttl); |
| 611 |
1280 |
return (HSH_HITMISS); |
| 612 |
|
} |
| 613 |
41204 |
oc->hits++; |
| 614 |
41204 |
boc_progress = oc->boc == NULL ? -1 : oc->boc->fetched_so_far; |
| 615 |
41204 |
AN(hsh_deref_objhead_unlock(wrk, &oh, oc)); |
| 616 |
41204 |
Req_LogHit(wrk, req, oc, boc_progress); |
| 617 |
41204 |
return (HSH_HIT); |
| 618 |
|
} |
| 619 |
|
|
| 620 |
61623 |
if (exp_oc != NULL && exp_oc->flags & OC_F_HFM) { |
| 621 |
|
/* |
| 622 |
|
* expired HFM ("grace/keep HFM") |
| 623 |
|
* |
| 624 |
|
* XXX should HFM objects actually have grace/keep ? |
| 625 |
|
* XXX also: why isn't *ocp = exp_oc ? |
| 626 |
|
*/ |
| 627 |
160 |
xid = VXID(ObjGetXID(wrk, exp_oc)); |
| 628 |
160 |
dttl = EXP_Dttl(req, exp_oc); |
| 629 |
160 |
*bocp = hsh_insert_busyobj(wrk, oh); |
| 630 |
160 |
Lck_Unlock(&oh->mtx); |
| 631 |
160 |
wrk->stats->cache_hitmiss++; |
| 632 |
160 |
VSLb(req->vsl, SLT_HitMiss, "%u %.6f", xid, dttl); |
| 633 |
160 |
return (HSH_HITMISS); |
| 634 |
|
} |
| 635 |
|
|
| 636 |
61463 |
if (exp_oc != NULL && exp_oc->boc != NULL) |
| 637 |
200 |
boc_progress = exp_oc->boc->fetched_so_far; |
| 638 |
|
else |
| 639 |
61263 |
boc_progress = -1; |
| 640 |
|
|
| 641 |
61463 |
if (!busy_found) { |
| 642 |
59145 |
*bocp = hsh_insert_busyobj(wrk, oh); |
| 643 |
|
|
| 644 |
59145 |
if (exp_oc != NULL) { |
| 645 |
5360 |
exp_oc->refcnt++; |
| 646 |
5360 |
*ocp = exp_oc; |
| 647 |
5360 |
if (EXP_Ttl_grace(req, exp_oc) >= req->t_req) { |
| 648 |
3760 |
exp_oc->hits++; |
| 649 |
3760 |
Lck_Unlock(&oh->mtx); |
| 650 |
3760 |
Req_LogHit(wrk, req, exp_oc, boc_progress); |
| 651 |
3760 |
return (HSH_GRACE); |
| 652 |
|
} |
| 653 |
1600 |
} |
| 654 |
55385 |
Lck_Unlock(&oh->mtx); |
| 655 |
55385 |
return (HSH_MISS); |
| 656 |
|
} |
| 657 |
|
|
| 658 |
2318 |
AN(busy_found); |
| 659 |
2318 |
if (exp_oc != NULL && EXP_Ttl_grace(req, exp_oc) >= req->t_req) { |
| 660 |
|
/* we do not wait on the busy object if in grace */ |
| 661 |
120 |
exp_oc->refcnt++; |
| 662 |
120 |
*ocp = exp_oc; |
| 663 |
120 |
exp_oc->hits++; |
| 664 |
120 |
AN(hsh_deref_objhead_unlock(wrk, &oh, NULL)); |
| 665 |
120 |
Req_LogHit(wrk, req, exp_oc, boc_progress); |
| 666 |
120 |
return (HSH_GRACE); |
| 667 |
|
} |
| 668 |
|
|
| 669 |
|
/* There are one or more busy objects, wait for them */ |
| 670 |
2198 |
VTAILQ_INSERT_TAIL(&oh->waitinglist, req, w_list); |
| 671 |
|
|
| 672 |
2198 |
AZ(req->hash_ignore_busy); |
| 673 |
|
|
| 674 |
|
/* |
| 675 |
|
* The objhead reference is held by req while it is parked on the |
| 676 |
|
* waiting list. The oh pointer is taken back from the objcore that |
| 677 |
|
* triggers a rush of req off the waiting list. |
| 678 |
|
*/ |
| 679 |
2198 |
assert(oh->refcnt > 1); |
| 680 |
|
|
| 681 |
2198 |
req->wrk = NULL; |
| 682 |
2198 |
req->waitinglist_gen = oh->waitinglist_gen; |
| 683 |
|
|
| 684 |
2198 |
if (DO_DEBUG(DBG_WAITINGLIST)) |
| 685 |
880 |
VSLb(req->vsl, SLT_Debug, "on waiting list <%p>", oh); |
| 686 |
|
|
| 687 |
2198 |
Lck_Unlock(&oh->mtx); |
| 688 |
|
|
| 689 |
2198 |
wrk->stats->busy_sleep++; |
| 690 |
2198 |
return (HSH_BUSY); |
| 691 |
106866 |
} |
| 692 |
|
|
| 693 |
|
/*--------------------------------------------------------------------- |
| 694 |
|
* Pick the req's we are going to rush from the waiting list |
| 695 |
|
*/ |
| 696 |
|
|
| 697 |
|
static void |
| 698 |
4871 |
hsh_rush1(const struct worker *wrk, struct objcore *oc, struct rush *r) |
| 699 |
|
{ |
| 700 |
|
struct objhead *oh; |
| 701 |
|
struct req *req; |
| 702 |
|
int i, max; |
| 703 |
|
|
| 704 |
4871 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 705 |
4871 |
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); |
| 706 |
4871 |
CHECK_OBJ_NOTNULL(r, RUSH_MAGIC); |
| 707 |
4871 |
VTAILQ_INIT(&r->reqs); |
| 708 |
|
|
| 709 |
4871 |
if (oc == NULL) |
| 710 |
40 |
return; |
| 711 |
|
|
| 712 |
4831 |
oh = oc->objhead; |
| 713 |
4831 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 714 |
4831 |
Lck_AssertHeld(&oh->mtx); |
| 715 |
|
|
| 716 |
4831 |
AZ(oc->flags & OC_F_BUSY); |
| 717 |
4831 |
AZ(oc->flags & OC_F_PRIVATE); |
| 718 |
4831 |
max = cache_param->rush_exponent; |
| 719 |
4831 |
if (oc->flags & (OC_F_WITHDRAWN|OC_F_FAILED)) |
| 720 |
3225 |
max = 1; |
| 721 |
4831 |
assert(max > 0); |
| 722 |
|
|
| 723 |
4831 |
if (oc->waitinglist_gen == 0) { |
| 724 |
4591 |
oc->waitinglist_gen = oh->waitinglist_gen; |
| 725 |
4591 |
oh->waitinglist_gen++; |
| 726 |
4591 |
} |
| 727 |
|
|
| 728 |
7028 |
for (i = 0; i < max; i++) { |
| 729 |
6586 |
req = VTAILQ_FIRST(&oh->waitinglist); |
| 730 |
6586 |
if (req == NULL) |
| 731 |
4389 |
break; |
| 732 |
|
|
| 733 |
2197 |
CHECK_OBJ(req, REQ_MAGIC); |
| 734 |
|
|
| 735 |
|
/* NB: The waiting list is naturally sorted by generation. |
| 736 |
|
* |
| 737 |
|
* Because of the exponential nature of the rush, it is |
| 738 |
|
* possible that new requests enter the waiting list before |
| 739 |
|
* the rush for this oc completes. Because the OC_F_BUSY flag |
| 740 |
|
* was cleared before the beginning of the rush, requests |
| 741 |
|
* from a newer generation already got a chance to evaluate |
| 742 |
|
* oc during a lookup and it didn't match their criteria. |
| 743 |
|
* |
| 744 |
|
* Therefore there's no point propagating the exponential |
| 745 |
|
* rush of this oc when we see a newer generation. |
| 746 |
|
*/ |
| 747 |
2197 |
if (req->waitinglist_gen > oc->waitinglist_gen) |
| 748 |
0 |
break; |
| 749 |
|
|
| 750 |
2197 |
AZ(req->wrk); |
| 751 |
2197 |
VTAILQ_REMOVE(&oh->waitinglist, req, w_list); |
| 752 |
2197 |
VTAILQ_INSERT_TAIL(&r->reqs, req, w_list); |
| 753 |
2197 |
req->objcore = oc; |
| 754 |
2197 |
oc->refcnt++; |
| 755 |
2197 |
wrk->stats->busy_wakeup++; |
| 756 |
2197 |
} |
| 757 |
4871 |
} |
| 758 |
|
|
| 759 |
|
/*--------------------------------------------------------------------- |
| 760 |
|
* Rush req's that came from waiting list. |
| 761 |
|
*/ |
| 762 |
|
|
| 763 |
|
static void |
| 764 |
124830 |
hsh_rush2(struct worker *wrk, struct rush *r) |
| 765 |
|
{ |
| 766 |
|
struct req *req; |
| 767 |
|
|
| 768 |
124830 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 769 |
124830 |
CHECK_OBJ_NOTNULL(r, RUSH_MAGIC); |
| 770 |
|
|
| 771 |
127028 |
while (!VTAILQ_EMPTY(&r->reqs)) { |
| 772 |
2198 |
req = VTAILQ_FIRST(&r->reqs); |
| 773 |
2198 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
| 774 |
2198 |
VTAILQ_REMOVE(&r->reqs, req, w_list); |
| 775 |
2198 |
DSL(DBG_WAITINGLIST, req->vsl->wid, "off waiting list"); |
| 776 |
2198 |
if (req->transport->reembark != NULL) { |
| 777 |
|
// For ESI includes |
| 778 |
42 |
req->transport->reembark(wrk, req); |
| 779 |
42 |
} else { |
| 780 |
|
/* |
| 781 |
|
* We ignore the queue limits which apply to new |
| 782 |
|
* requests because if we fail to reschedule there |
| 783 |
|
* may be vmod_privs to cleanup and we need a proper |
| 784 |
|
* workerthread for that. |
| 785 |
|
*/ |
| 786 |
2156 |
AZ(Pool_Task(req->sp->pool, req->task, TASK_QUEUE_RUSH)); |
| 787 |
|
} |
| 788 |
|
} |
| 789 |
124830 |
} |
| 790 |
|
|
| 791 |
|
/*--------------------------------------------------------------------- |
| 792 |
|
* Purge an entire objhead |
| 793 |
|
*/ |
| 794 |
|
|
| 795 |
|
unsigned |
| 796 |
960 |
HSH_Purge(struct worker *wrk, struct objhead *oh, vtim_real ttl_now, |
| 797 |
|
vtim_dur ttl, vtim_dur grace, vtim_dur keep) |
| 798 |
|
{ |
| 799 |
|
struct objcore *oc, *oc_nows[2], **ocp; |
| 800 |
960 |
unsigned i, j, n, n_max, total = 0; |
| 801 |
|
int is_purge; |
| 802 |
|
|
| 803 |
960 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 804 |
960 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 805 |
|
|
| 806 |
960 |
is_purge = (ttl == 0 && grace == 0 && keep == 0); |
| 807 |
960 |
n_max = WS_ReserveLumps(wrk->aws, sizeof *ocp); |
| 808 |
960 |
if (n_max < 2) { |
| 809 |
|
/* No space on the workspace. Give it a stack buffer of 2 |
| 810 |
|
* elements, which is the minimum for the algorithm |
| 811 |
|
* below. */ |
| 812 |
0 |
ocp = oc_nows; |
| 813 |
0 |
n_max = 2; |
| 814 |
0 |
} else |
| 815 |
960 |
ocp = WS_Reservation(wrk->aws); |
| 816 |
960 |
AN(ocp); |
| 817 |
|
|
| 818 |
|
/* Note: This algorithm uses OC references in the list as |
| 819 |
|
* bookmarks, in order to know how far into the list we were when |
| 820 |
|
* releasing the mutex partway through and want to resume |
| 821 |
|
* again. This relies on the list not being reordered while we are |
| 822 |
|
* not holding the mutex. The only place where that happens is in |
| 823 |
|
* HSH_Unbusy(), where an OC_F_BUSY OC is moved first in the |
| 824 |
|
* list. This does not cause problems because we skip OC_F_BUSY |
| 825 |
|
* OCs. */ |
| 826 |
|
|
| 827 |
960 |
Lck_Lock(&oh->mtx); |
| 828 |
960 |
oc = VTAILQ_FIRST(&oh->objcs); |
| 829 |
960 |
n = 0; |
| 830 |
1000 |
while (1) { |
| 831 |
5680 |
for (; n < n_max && oc != NULL; oc = VTAILQ_NEXT(oc, hsh_list)) |
| 832 |
|
{ |
| 833 |
4680 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 834 |
4680 |
assert(oc->objhead == oh); |
| 835 |
4680 |
if (oc->flags & OC_F_BUSY) { |
| 836 |
|
/* We cannot purge busy objects here, because |
| 837 |
|
* their owners have special rights to them, |
| 838 |
|
* and may nuke them without concern for the |
| 839 |
|
* refcount, which by definition always must |
| 840 |
|
* be one, so they don't check. */ |
| 841 |
920 |
continue; |
| 842 |
|
} |
| 843 |
3760 |
if (oc->flags & OC_F_DYING) |
| 844 |
0 |
continue; |
| 845 |
3760 |
if (is_purge) |
| 846 |
3360 |
oc->flags |= OC_F_DYING; |
| 847 |
3760 |
oc->refcnt++; |
| 848 |
3760 |
ocp[n++] = oc; |
| 849 |
3760 |
} |
| 850 |
|
|
| 851 |
1000 |
Lck_Unlock(&oh->mtx); |
| 852 |
|
|
| 853 |
1000 |
if (n == 0) { |
| 854 |
|
/* No eligible objcores found. We are finished. */ |
| 855 |
160 |
break; |
| 856 |
|
} |
| 857 |
|
|
| 858 |
840 |
j = n; |
| 859 |
840 |
if (oc != NULL) { |
| 860 |
|
/* There are more objects on the objhead that we |
| 861 |
|
* have not yet looked at, but no more space on |
| 862 |
|
* the objcore reference list. Do not process the |
| 863 |
|
* last one, it will be used as the bookmark into |
| 864 |
|
* the objcore list for the next iteration of the |
| 865 |
|
* outer loop. */ |
| 866 |
40 |
j--; |
| 867 |
40 |
assert(j >= 1); /* True because n_max >= 2 */ |
| 868 |
40 |
} |
| 869 |
4600 |
for (i = 0; i < j; i++) { |
| 870 |
3760 |
CHECK_OBJ_NOTNULL(ocp[i], OBJCORE_MAGIC); |
| 871 |
3760 |
if (is_purge) |
| 872 |
3360 |
EXP_Remove(ocp[i], NULL); |
| 873 |
|
else |
| 874 |
400 |
EXP_Reduce(ocp[i], ttl_now, ttl, grace, keep); |
| 875 |
3760 |
(void)HSH_DerefObjCore(wrk, &ocp[i]); |
| 876 |
3760 |
AZ(ocp[i]); |
| 877 |
3760 |
total++; |
| 878 |
3760 |
} |
| 879 |
|
|
| 880 |
840 |
if (j == n) { |
| 881 |
|
/* No bookmark set, that means we got to the end |
| 882 |
|
* of the objcore list in the previous run and are |
| 883 |
|
* finished. */ |
| 884 |
800 |
break; |
| 885 |
|
} |
| 886 |
|
|
| 887 |
40 |
Lck_Lock(&oh->mtx); |
| 888 |
|
|
| 889 |
|
/* Move the bookmark first and continue scanning the |
| 890 |
|
* objcores */ |
| 891 |
40 |
CHECK_OBJ_NOTNULL(ocp[j], OBJCORE_MAGIC); |
| 892 |
40 |
ocp[0] = ocp[j]; |
| 893 |
40 |
n = 1; |
| 894 |
40 |
oc = VTAILQ_NEXT(ocp[0], hsh_list); |
| 895 |
40 |
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); |
| 896 |
|
} |
| 897 |
|
|
| 898 |
960 |
WS_Release(wrk->aws, 0); |
| 899 |
960 |
if (is_purge) |
| 900 |
560 |
Pool_PurgeStat(total); |
| 901 |
960 |
return (total); |
| 902 |
|
} |
| 903 |
|
|
| 904 |
|
/*--------------------------------------------------------------------- |
| 905 |
|
* Fail an objcore |
| 906 |
|
*/ |
| 907 |
|
|
| 908 |
|
void |
| 909 |
2716 |
HSH_Fail(struct worker *wrk, struct objcore *oc) |
| 910 |
|
{ |
| 911 |
|
struct objhead *oh; |
| 912 |
|
struct rush rush; |
| 913 |
|
|
| 914 |
2716 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 915 |
2716 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 916 |
2716 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
| 917 |
2716 |
oh = oc->objhead; |
| 918 |
2716 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 919 |
2716 |
INIT_OBJ(&rush, RUSH_MAGIC); |
| 920 |
|
|
| 921 |
|
/* |
| 922 |
|
* We either failed before the end of vcl_backend_response |
| 923 |
|
* and a cache miss has the busy bit, so that HSH_Lookup() |
| 924 |
|
* will not consider this oc, or an object hung off the oc |
| 925 |
|
* so that it can consider it. |
| 926 |
|
* |
| 927 |
|
* We can only fail an ongoing fetch in a backend context |
| 928 |
|
* so we can safely check the BOC state as it won't change |
| 929 |
|
* under our feet. |
| 930 |
|
*/ |
| 931 |
2716 |
if (oc->boc->state < BOS_STREAM) |
| 932 |
2040 |
assert(oc->flags & (OC_F_BUSY|OC_F_PRIVATE)); |
| 933 |
|
else |
| 934 |
676 |
assert(oc->stobj->stevedore != NULL); |
| 935 |
|
|
| 936 |
2716 |
Lck_Lock(&oh->mtx); |
| 937 |
2716 |
oc->flags |= OC_F_FAILED; |
| 938 |
2716 |
if (oc->flags & OC_F_BUSY) { |
| 939 |
1800 |
oc->flags &= ~OC_F_BUSY; |
| 940 |
1800 |
hsh_rush1(wrk, oc, &rush); |
| 941 |
1800 |
} |
| 942 |
2716 |
Lck_Unlock(&oh->mtx); |
| 943 |
2716 |
hsh_rush2(wrk, &rush); |
| 944 |
2716 |
} |
| 945 |
|
|
| 946 |
|
/*--------------------------------------------------------------------- |
| 947 |
|
* Mark a fetch we will not need as cancelled |
| 948 |
|
*/ |
| 949 |
|
|
| 950 |
|
static void |
| 951 |
14095 |
hsh_cancel(struct objcore *oc) |
| 952 |
|
{ |
| 953 |
|
struct objhead *oh; |
| 954 |
|
|
| 955 |
14095 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 956 |
14095 |
oh = oc->objhead; |
| 957 |
14095 |
CHECK_OBJ(oh, OBJHEAD_MAGIC); |
| 958 |
|
|
| 959 |
14095 |
Lck_Lock(&oh->mtx); |
| 960 |
14095 |
oc->flags |= OC_F_CANCEL; |
| 961 |
14095 |
Lck_Unlock(&oh->mtx); |
| 962 |
14095 |
} |
| 963 |
|
|
| 964 |
|
/*--------------------------------------------------------------------- |
| 965 |
|
* Cancel a fetch when the client does not need it any more |
| 966 |
|
*/ |
| 967 |
|
|
| 968 |
|
void |
| 969 |
154092 |
HSH_Cancel(struct worker *wrk, struct objcore *oc, struct boc *boc) |
| 970 |
|
{ |
| 971 |
154092 |
struct boc *bocref = NULL; |
| 972 |
|
|
| 973 |
154092 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 974 |
|
|
| 975 |
154092 |
if ((oc->flags & OC_F_TRANSIENT) == 0) |
| 976 |
98014 |
return; |
| 977 |
|
|
| 978 |
|
/* |
| 979 |
|
* NB: we use two distinct variables to only release the reference if |
| 980 |
|
* we had to acquire one. The caller-provided boc is optional. |
| 981 |
|
*/ |
| 982 |
56078 |
if (boc == NULL) |
| 983 |
42102 |
bocref = boc = HSH_RefBoc(oc); |
| 984 |
|
|
| 985 |
56078 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
| 986 |
|
|
| 987 |
56078 |
if (oc->flags & OC_F_HFP) |
| 988 |
880 |
AN(oc->flags & OC_F_HFM); |
| 989 |
|
|
| 990 |
56078 |
if (boc != NULL) { |
| 991 |
14095 |
hsh_cancel(oc); |
| 992 |
14095 |
ObjWaitState(oc, BOS_FINISHED); |
| 993 |
14095 |
} |
| 994 |
|
|
| 995 |
56078 |
if (bocref != NULL) |
| 996 |
120 |
HSH_DerefBoc(wrk, oc); |
| 997 |
|
|
| 998 |
56078 |
ObjSlim(wrk, oc); |
| 999 |
154092 |
} |
| 1000 |
|
|
| 1001 |
|
/*--------------------------------------------------------------------- |
| 1002 |
|
* Withdraw an objcore that will not proceed with a fetch. |
| 1003 |
|
*/ |
| 1004 |
|
|
| 1005 |
|
void |
| 1006 |
1425 |
HSH_Withdraw(struct worker *wrk, struct objcore **ocp) |
| 1007 |
|
{ |
| 1008 |
|
struct objhead *oh; |
| 1009 |
|
struct objcore *oc; |
| 1010 |
|
struct rush rush; |
| 1011 |
|
|
| 1012 |
1425 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 1013 |
1425 |
TAKE_OBJ_NOTNULL(oc, ocp, OBJCORE_MAGIC); |
| 1014 |
1425 |
INIT_OBJ(&rush, RUSH_MAGIC); |
| 1015 |
|
|
| 1016 |
1425 |
oh = oc->objhead; |
| 1017 |
1425 |
CHECK_OBJ(oh, OBJHEAD_MAGIC); |
| 1018 |
|
|
| 1019 |
1425 |
Lck_Lock(&oh->mtx); |
| 1020 |
1425 |
AZ(oc->stobj->stevedore); |
| 1021 |
1425 |
AN(oc->flags & OC_F_BUSY); |
| 1022 |
1425 |
assert(oc->refcnt == 1); |
| 1023 |
1425 |
assert(oh->refcnt > 0); |
| 1024 |
1425 |
oc->flags &= ~OC_F_BUSY; |
| 1025 |
1425 |
oc->flags |= OC_F_WITHDRAWN; |
| 1026 |
1425 |
hsh_rush1(wrk, oc, &rush); /* grabs up to 1 oc ref */ |
| 1027 |
1425 |
assert(hsh_deref_objcore_unlock(wrk, &oc) <= 1); |
| 1028 |
|
|
| 1029 |
1425 |
hsh_rush2(wrk, &rush); |
| 1030 |
1425 |
} |
| 1031 |
|
|
| 1032 |
|
/*--------------------------------------------------------------------- |
| 1033 |
|
* Unbusy an objcore when the object is completely fetched. |
| 1034 |
|
*/ |
| 1035 |
|
|
| 1036 |
|
void |
| 1037 |
89159 |
HSH_Unbusy(struct worker *wrk, struct objcore *oc) |
| 1038 |
|
{ |
| 1039 |
|
struct objhead *oh; |
| 1040 |
|
struct rush rush; |
| 1041 |
|
|
| 1042 |
89159 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 1043 |
89159 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 1044 |
89159 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
| 1045 |
|
|
| 1046 |
89159 |
oh = oc->objhead; |
| 1047 |
89159 |
CHECK_OBJ(oh, OBJHEAD_MAGIC); |
| 1048 |
|
|
| 1049 |
89159 |
AN(oc->stobj->stevedore); |
| 1050 |
89159 |
assert(oh->refcnt > 0); |
| 1051 |
89159 |
assert(oc->refcnt > 0); |
| 1052 |
|
|
| 1053 |
89159 |
if (oc->flags & OC_F_PRIVATE) { |
| 1054 |
31159 |
AZ(oc->flags & OC_F_BUSY); |
| 1055 |
31159 |
return; |
| 1056 |
|
} |
| 1057 |
|
|
| 1058 |
58000 |
AN(oc->flags & OC_F_BUSY); |
| 1059 |
58000 |
INIT_OBJ(&rush, RUSH_MAGIC); |
| 1060 |
|
|
| 1061 |
58000 |
BAN_NewObjCore(oc); |
| 1062 |
58000 |
AN(oc->ban); |
| 1063 |
|
|
| 1064 |
|
/* XXX: pretouch neighbors on oh->objcs to prevent page-on under mtx */ |
| 1065 |
58000 |
Lck_Lock(&oh->mtx); |
| 1066 |
58000 |
assert(oh->refcnt > 0); |
| 1067 |
58000 |
assert(oc->refcnt > 0); |
| 1068 |
58000 |
EXP_RefNewObjcore(oc); /* Takes a ref for expiry */ |
| 1069 |
|
/* XXX: strictly speaking, we should sort in Date: order. */ |
| 1070 |
58000 |
VTAILQ_REMOVE(&oh->objcs, oc, hsh_list); |
| 1071 |
58000 |
VTAILQ_INSERT_HEAD(&oh->objcs, oc, hsh_list); |
| 1072 |
58000 |
oc->flags &= ~OC_F_BUSY; |
| 1073 |
58000 |
if (!VTAILQ_EMPTY(&oh->waitinglist)) { |
| 1074 |
1349 |
assert(oh->refcnt > 1); |
| 1075 |
1349 |
hsh_rush1(wrk, oc, &rush); |
| 1076 |
1349 |
} |
| 1077 |
58000 |
Lck_Unlock(&oh->mtx); |
| 1078 |
58000 |
EXP_Insert(wrk, oc); |
| 1079 |
58000 |
hsh_rush2(wrk, &rush); |
| 1080 |
89159 |
} |
| 1081 |
|
|
| 1082 |
|
/*==================================================================== |
| 1083 |
|
* HSH_Kill() |
| 1084 |
|
* |
| 1085 |
|
* It's dead Jim, kick it... |
| 1086 |
|
*/ |
| 1087 |
|
|
| 1088 |
|
void |
| 1089 |
9436 |
HSH_Kill(struct objcore *oc) |
| 1090 |
|
{ |
| 1091 |
|
|
| 1092 |
9436 |
HSH_Replace(oc, NULL); |
| 1093 |
9436 |
} |
| 1094 |
|
|
| 1095 |
|
void |
| 1096 |
12596 |
HSH_Replace(struct objcore *oc, const struct objcore *new_oc) |
| 1097 |
|
{ |
| 1098 |
|
|
| 1099 |
12596 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 1100 |
12596 |
CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC); |
| 1101 |
12596 |
if (new_oc != NULL) { |
| 1102 |
3160 |
CHECK_OBJ(new_oc, OBJCORE_MAGIC); |
| 1103 |
3160 |
assert(oc->objhead == new_oc->objhead); |
| 1104 |
3160 |
} |
| 1105 |
|
|
| 1106 |
12596 |
Lck_Lock(&oc->objhead->mtx); |
| 1107 |
12596 |
oc->flags |= OC_F_DYING; |
| 1108 |
12596 |
Lck_Unlock(&oc->objhead->mtx); |
| 1109 |
12596 |
EXP_Remove(oc, new_oc); |
| 1110 |
12596 |
} |
| 1111 |
|
|
| 1112 |
|
/*==================================================================== |
| 1113 |
|
* HSH_Snipe() |
| 1114 |
|
* |
| 1115 |
|
* If objcore is idle, gain a ref and mark it dead. |
| 1116 |
|
*/ |
| 1117 |
|
|
| 1118 |
|
int |
| 1119 |
440 |
HSH_Snipe(const struct worker *wrk, struct objcore *oc) |
| 1120 |
|
{ |
| 1121 |
440 |
int retval = 0; |
| 1122 |
|
|
| 1123 |
440 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 1124 |
440 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 1125 |
440 |
CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC); |
| 1126 |
|
|
| 1127 |
440 |
if (oc->refcnt == 1 && !Lck_Trylock(&oc->objhead->mtx)) { |
| 1128 |
440 |
if (oc->refcnt == 1 && !(oc->flags & OC_F_DYING)) { |
| 1129 |
440 |
oc->flags |= OC_F_DYING; |
| 1130 |
440 |
oc->refcnt++; |
| 1131 |
440 |
retval = 1; |
| 1132 |
440 |
} |
| 1133 |
440 |
Lck_Unlock(&oc->objhead->mtx); |
| 1134 |
440 |
} |
| 1135 |
440 |
if (retval) |
| 1136 |
440 |
EXP_Remove(oc, NULL); |
| 1137 |
440 |
return (retval); |
| 1138 |
|
} |
| 1139 |
|
|
| 1140 |
|
|
| 1141 |
|
/*--------------------------------------------------------------------- |
| 1142 |
|
* Gain a reference on an objcore |
| 1143 |
|
*/ |
| 1144 |
|
|
| 1145 |
|
void |
| 1146 |
98474 |
HSH_Ref(struct objcore *oc) |
| 1147 |
|
{ |
| 1148 |
|
struct objhead *oh; |
| 1149 |
|
|
| 1150 |
98474 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 1151 |
98474 |
oh = oc->objhead; |
| 1152 |
98474 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 1153 |
98474 |
Lck_Lock(&oh->mtx); |
| 1154 |
98474 |
assert(oc->refcnt > 0); |
| 1155 |
98474 |
oc->refcnt++; |
| 1156 |
98474 |
Lck_Unlock(&oh->mtx); |
| 1157 |
98474 |
} |
| 1158 |
|
|
| 1159 |
|
/*--------------------------------------------------------------------- |
| 1160 |
|
* Gain a reference on the busyobj, if the objcore has one |
| 1161 |
|
*/ |
| 1162 |
|
|
| 1163 |
|
struct boc * |
| 1164 |
385101 |
HSH_RefBoc(const struct objcore *oc) |
| 1165 |
|
{ |
| 1166 |
|
struct objhead *oh; |
| 1167 |
|
struct boc *boc; |
| 1168 |
|
|
| 1169 |
385101 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 1170 |
385101 |
oh = oc->objhead; |
| 1171 |
385101 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 1172 |
385101 |
if (oc->boc == NULL) |
| 1173 |
226145 |
return (NULL); |
| 1174 |
158956 |
Lck_Lock(&oh->mtx); |
| 1175 |
158956 |
assert(oc->refcnt > 0); |
| 1176 |
158956 |
boc = oc->boc; |
| 1177 |
158956 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
| 1178 |
158956 |
if (boc != NULL) { |
| 1179 |
158928 |
assert(boc->refcount > 0); |
| 1180 |
158928 |
if (boc->state < BOS_FINISHED) |
| 1181 |
157939 |
boc->refcount++; |
| 1182 |
|
else |
| 1183 |
989 |
boc = NULL; |
| 1184 |
158928 |
} |
| 1185 |
158956 |
Lck_Unlock(&oh->mtx); |
| 1186 |
158956 |
return (boc); |
| 1187 |
385101 |
} |
| 1188 |
|
|
| 1189 |
|
void |
| 1190 |
275123 |
HSH_DerefBoc(struct worker *wrk, struct objcore *oc) |
| 1191 |
|
{ |
| 1192 |
|
struct boc *boc; |
| 1193 |
|
unsigned r; |
| 1194 |
|
|
| 1195 |
275123 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 1196 |
275123 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 1197 |
275123 |
boc = oc->boc; |
| 1198 |
275123 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
| 1199 |
275123 |
Lck_Lock(&oc->objhead->mtx); |
| 1200 |
275123 |
assert(oc->refcnt > 0); |
| 1201 |
275123 |
assert(boc->refcount > 0); |
| 1202 |
275123 |
r = --boc->refcount; |
| 1203 |
275123 |
if (r == 0) |
| 1204 |
117239 |
oc->boc = NULL; |
| 1205 |
275123 |
Lck_Unlock(&oc->objhead->mtx); |
| 1206 |
275123 |
if (r == 0) |
| 1207 |
117238 |
ObjBocDone(wrk, oc, &boc); |
| 1208 |
275123 |
} |
| 1209 |
|
|
| 1210 |
|
/*-------------------------------------------------------------------- |
| 1211 |
|
* Dereference objcore |
| 1212 |
|
* |
| 1213 |
|
* Returns zero if target was destroyed. |
| 1214 |
|
*/ |
| 1215 |
|
|
| 1216 |
|
int |
| 1217 |
286201 |
HSH_DerefObjCore(struct worker *wrk, struct objcore **ocp) |
| 1218 |
|
{ |
| 1219 |
|
struct objcore *oc; |
| 1220 |
|
struct objhead *oh; |
| 1221 |
|
|
| 1222 |
286201 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 1223 |
286201 |
TAKE_OBJ_NOTNULL(oc, ocp, OBJCORE_MAGIC); |
| 1224 |
286201 |
assert(oc->refcnt > 0); |
| 1225 |
|
|
| 1226 |
286201 |
oh = oc->objhead; |
| 1227 |
286201 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 1228 |
|
|
| 1229 |
286201 |
Lck_Lock(&oh->mtx); |
| 1230 |
286201 |
return (hsh_deref_objcore_unlock(wrk, &oc)); |
| 1231 |
|
} |
| 1232 |
|
|
| 1233 |
|
static int |
| 1234 |
287691 |
hsh_deref_objcore_unlock(struct worker *wrk, struct objcore **ocp) |
| 1235 |
|
{ |
| 1236 |
|
struct objcore *oc; |
| 1237 |
|
struct objhead *oh; |
| 1238 |
|
int r; |
| 1239 |
|
|
| 1240 |
287691 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 1241 |
287691 |
TAKE_OBJ_NOTNULL(oc, ocp, OBJCORE_MAGIC); |
| 1242 |
287691 |
assert(oc->refcnt > 0); |
| 1243 |
|
|
| 1244 |
287691 |
oh = oc->objhead; |
| 1245 |
287691 |
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); |
| 1246 |
|
|
| 1247 |
287691 |
Lck_AssertHeld(&oh->mtx); |
| 1248 |
287691 |
assert(oh->refcnt > 0); |
| 1249 |
287691 |
r = --oc->refcnt; |
| 1250 |
287691 |
if (!r) |
| 1251 |
75406 |
VTAILQ_REMOVE(&oh->objcs, oc, hsh_list); |
| 1252 |
287691 |
Lck_Unlock(&oh->mtx); |
| 1253 |
287691 |
if (r != 0) |
| 1254 |
212282 |
return (r); |
| 1255 |
|
|
| 1256 |
75409 |
AZ(oc->flags & OC_F_BUSY); |
| 1257 |
75409 |
AZ(oc->exp_flags); |
| 1258 |
|
|
| 1259 |
75409 |
BAN_DestroyObj(oc); |
| 1260 |
75409 |
AZ(oc->ban); |
| 1261 |
|
|
| 1262 |
75409 |
if (oc->stobj->stevedore != NULL) |
| 1263 |
71863 |
ObjFreeObj(wrk, oc); |
| 1264 |
75409 |
ObjDestroy(wrk, &oc); |
| 1265 |
|
|
| 1266 |
|
/* Drop our ref on the objhead */ |
| 1267 |
75409 |
assert(oh->refcnt > 0); |
| 1268 |
75409 |
(void)hsh_deref_objhead(wrk, &oh); |
| 1269 |
75409 |
return (0); |
| 1270 |
287691 |
} |
| 1271 |
|
|
| 1272 |
|
static int |
| 1273 |
118812 |
hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh, |
| 1274 |
|
struct objcore *oc) |
| 1275 |
|
{ |
| 1276 |
|
struct objhead *oh; |
| 1277 |
|
struct rush rush; |
| 1278 |
|
int r; |
| 1279 |
|
|
| 1280 |
118812 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 1281 |
118812 |
TAKE_OBJ_NOTNULL(oh, poh, OBJHEAD_MAGIC); |
| 1282 |
|
|
| 1283 |
118812 |
Lck_AssertHeld(&oh->mtx); |
| 1284 |
|
|
| 1285 |
118812 |
if (oh >= private_ohs && oh < private_ohs + vcountof(private_ohs)) { |
| 1286 |
56799 |
assert(VTAILQ_EMPTY(&oh->waitinglist)); |
| 1287 |
56799 |
assert(oh->refcnt > 1); |
| 1288 |
56799 |
oh->refcnt--; |
| 1289 |
56799 |
Lck_Unlock(&oh->mtx); |
| 1290 |
56799 |
return (1); |
| 1291 |
|
} |
| 1292 |
|
|
| 1293 |
|
//lint --e{661} |
| 1294 |
|
//lint -specific(-e661) |
| 1295 |
|
// |
| 1296 |
|
// because of the static array, flexelint thinks that all ohs were from |
| 1297 |
|
// the static array :( the above suppression applies to the remainder of |
| 1298 |
|
// this function body and specific walks involving this function |
| 1299 |
|
|
| 1300 |
62013 |
INIT_OBJ(&rush, RUSH_MAGIC); |
| 1301 |
62013 |
if (!VTAILQ_EMPTY(&oh->waitinglist)) { |
| 1302 |
297 |
assert(oh->refcnt > 1); |
| 1303 |
297 |
hsh_rush1(wrk, oc, &rush); |
| 1304 |
297 |
} |
| 1305 |
|
|
| 1306 |
62013 |
if (oh->refcnt == 1) |
| 1307 |
7240 |
assert(VTAILQ_EMPTY(&oh->waitinglist)); |
| 1308 |
|
|
| 1309 |
62013 |
assert(oh->refcnt > 0); |
| 1310 |
62013 |
r = hash->deref(wrk, oh); /* Unlocks oh->mtx */ |
| 1311 |
62013 |
hsh_rush2(wrk, &rush); |
| 1312 |
62013 |
return (r); |
| 1313 |
118812 |
} |
| 1314 |
|
|
| 1315 |
|
static int |
| 1316 |
75408 |
hsh_deref_objhead(struct worker *wrk, struct objhead **poh) |
| 1317 |
|
{ |
| 1318 |
|
struct objhead *oh; |
| 1319 |
|
|
| 1320 |
75408 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 1321 |
75408 |
TAKE_OBJ_NOTNULL(oh, poh, OBJHEAD_MAGIC); |
| 1322 |
|
|
| 1323 |
75408 |
Lck_Lock(&oh->mtx); |
| 1324 |
75408 |
return (hsh_deref_objhead_unlock(wrk, &oh, NULL)); |
| 1325 |
|
} |
| 1326 |
|
|
| 1327 |
|
void |
| 1328 |
38032 |
HSH_Init(const struct hash_slinger *slinger) |
| 1329 |
|
{ |
| 1330 |
|
|
| 1331 |
38032 |
assert(DIGEST_LEN == VSHA256_LEN); /* avoid #include pollution */ |
| 1332 |
38032 |
hash = slinger; |
| 1333 |
38032 |
if (hash->start != NULL) |
| 1334 |
38032 |
hash->start(); |
| 1335 |
4906128 |
for (struct objhead *oh = private_ohs; |
| 1336 |
4906128 |
oh < private_ohs + vcountof(private_ohs); |
| 1337 |
4868096 |
oh++) { |
| 1338 |
4868096 |
hsh_initobjhead(oh); |
| 1339 |
4868096 |
assert(oh->refcnt == 1); |
| 1340 |
4868096 |
} |
| 1341 |
38032 |
} |