| | varnish-cache/bin/varnishd/cache/cache_vrt_vcl.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2006 Verdens Gang AS |
| 2 |
|
* Copyright (c) 2006-2016 Varnish Software AS |
| 3 |
|
* All rights reserved. |
| 4 |
|
* |
| 5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 6 |
|
* |
| 7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 8 |
|
* |
| 9 |
|
* Redistribution and use in source and binary forms, with or without |
| 10 |
|
* modification, are permitted provided that the following conditions |
| 11 |
|
* are met: |
| 12 |
|
* 1. Redistributions of source code must retain the above copyright |
| 13 |
|
* notice, this list of conditions and the following disclaimer. |
| 14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 15 |
|
* notice, this list of conditions and the following disclaimer in the |
| 16 |
|
* documentation and/or other materials provided with the distribution. |
| 17 |
|
* |
| 18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 |
|
* SUCH DAMAGE. |
| 29 |
|
* |
| 30 |
|
*/ |
| 31 |
|
|
| 32 |
|
#include "config.h" |
| 33 |
|
|
| 34 |
|
#include <stdio.h> |
| 35 |
|
#include <stdlib.h> |
| 36 |
|
|
| 37 |
|
#include "cache_varnishd.h" |
| 38 |
|
|
| 39 |
|
#include "vcl.h" |
| 40 |
|
#include "vtim.h" |
| 41 |
|
#include "vbm.h" |
| 42 |
|
|
| 43 |
|
#include "cache_director.h" |
| 44 |
|
#include "cache_transport.h" |
| 45 |
|
#include "cache_vcl.h" |
| 46 |
|
#include "vcc_interface.h" |
| 47 |
|
|
| 48 |
|
/*--------------------------------------------------------------------*/ |
| 49 |
|
|
| 50 |
|
const char * |
| 51 |
784384 |
VCL_Return_Name(unsigned r) |
| 52 |
|
{ |
| 53 |
|
|
| 54 |
784384 |
switch (r) { |
| 55 |
|
#define VCL_RET_MAC(l, U, B) \ |
| 56 |
|
case VCL_RET_##U: \ |
| 57 |
|
return(#l); |
| 58 |
|
#include "tbl/vcl_returns.h" |
| 59 |
|
default: |
| 60 |
|
return (NULL); |
| 61 |
|
} |
| 62 |
784384 |
} |
| 63 |
|
|
| 64 |
|
const char * |
| 65 |
785168 |
VCL_Method_Name(unsigned m) |
| 66 |
|
{ |
| 67 |
|
|
| 68 |
785168 |
switch (m) { |
| 69 |
|
#define VCL_MET_MAC(func, upper, typ, bitmap) \ |
| 70 |
|
case VCL_MET_##upper: \ |
| 71 |
|
return (#upper); |
| 72 |
|
#include "tbl/vcl_returns.h" |
| 73 |
|
default: |
| 74 |
|
return (NULL); |
| 75 |
|
} |
| 76 |
785168 |
} |
| 77 |
|
|
| 78 |
|
/*--------------------------------------------------------------------*/ |
| 79 |
|
|
| 80 |
|
void |
| 81 |
136033 |
VCL_Refresh(struct vcl **vcc) |
| 82 |
|
{ |
| 83 |
|
|
| 84 |
136033 |
while (vcl_active == NULL) |
| 85 |
0 |
VTIM_sleep(0.1); |
| 86 |
|
|
| 87 |
136033 |
ASSERT_VCL_ACTIVE(); |
| 88 |
136019 |
if (*vcc == vcl_active) |
| 89 |
78453 |
return; |
| 90 |
|
|
| 91 |
57566 |
VCL_Update(vcc, NULL); |
| 92 |
136019 |
} |
| 93 |
|
|
| 94 |
|
void |
| 95 |
136214 |
VCL_Recache(const struct worker *wrk, struct vcl **vclp) |
| 96 |
|
{ |
| 97 |
|
|
| 98 |
136214 |
AN(wrk); |
| 99 |
136214 |
AN(vclp); |
| 100 |
136214 |
CHECK_OBJ_NOTNULL(*vclp, VCL_MAGIC); |
| 101 |
136214 |
ASSERT_VCL_ACTIVE(); |
| 102 |
|
|
| 103 |
136218 |
if (*vclp != vcl_active || wrk->wpriv->vcl == vcl_active) { |
| 104 |
2501 |
VCL_Rel(vclp); |
| 105 |
2501 |
return; |
| 106 |
|
} |
| 107 |
133717 |
if (wrk->wpriv->vcl != NULL) |
| 108 |
0 |
VCL_Rel(&wrk->wpriv->vcl); |
| 109 |
133717 |
wrk->wpriv->vcl = *vclp; |
| 110 |
133717 |
*vclp = NULL; |
| 111 |
136218 |
} |
| 112 |
|
|
| 113 |
|
void |
| 114 |
107120 |
VCL_Ref(struct vcl *vcl) |
| 115 |
|
{ |
| 116 |
|
|
| 117 |
107120 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 118 |
107120 |
assert(!vcl->temp->is_cold); |
| 119 |
107120 |
Lck_Lock(&vcl_mtx); |
| 120 |
107120 |
assert(vcl->busy > 0); |
| 121 |
107120 |
vcl->busy++; |
| 122 |
107120 |
Lck_Unlock(&vcl_mtx); |
| 123 |
107120 |
} |
| 124 |
|
|
| 125 |
|
void |
| 126 |
161815 |
VCL_Rel(struct vcl **vcc) |
| 127 |
|
{ |
| 128 |
|
struct vcl *vcl; |
| 129 |
|
|
| 130 |
161815 |
TAKE_OBJ_NOTNULL(vcl, vcc, VCL_MAGIC); |
| 131 |
161815 |
Lck_Lock(&vcl_mtx); |
| 132 |
161815 |
assert(vcl->busy > 0); |
| 133 |
161815 |
vcl->busy--; |
| 134 |
|
/* |
| 135 |
|
* We do not garbage collect discarded VCL's here, that happens |
| 136 |
|
* in VCL_Poll() which is called from the CLI thread. |
| 137 |
|
*/ |
| 138 |
161815 |
Lck_Unlock(&vcl_mtx); |
| 139 |
161815 |
} |
| 140 |
|
|
| 141 |
|
/*--------------------------------------------------------------------*/ |
| 142 |
|
|
| 143 |
|
static void |
| 144 |
4343 |
vcldir_free(struct vcldir *vdir) |
| 145 |
|
{ |
| 146 |
|
|
| 147 |
4343 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 148 |
4343 |
CHECK_OBJ_NOTNULL(vdir->dir, DIRECTOR_MAGIC); |
| 149 |
4343 |
AZ(vdir->refcnt); |
| 150 |
4343 |
Lck_Delete(&vdir->dlck); |
| 151 |
4343 |
free(vdir->cli_name); |
| 152 |
4343 |
FREE_OBJ(vdir->dir); |
| 153 |
4343 |
FREE_OBJ(vdir); |
| 154 |
4343 |
} |
| 155 |
|
|
| 156 |
|
static VCL_BACKEND |
| 157 |
0 |
vcldir_surplus(struct vcldir *vdir) |
| 158 |
|
{ |
| 159 |
|
|
| 160 |
0 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 161 |
0 |
assert(vdir->refcnt == 1); |
| 162 |
0 |
vdir->refcnt = 0; |
| 163 |
0 |
vcldir_free(vdir); |
| 164 |
0 |
return (NULL); |
| 165 |
|
} |
| 166 |
|
|
| 167 |
|
VCL_BACKEND |
| 168 |
60000 |
VRT_AddDirector(VRT_CTX, const struct vdi_methods *m, void *priv, |
| 169 |
|
const char *fmt, ...) |
| 170 |
|
{ |
| 171 |
|
struct vsb *vsb; |
| 172 |
|
struct vcl *vcl; |
| 173 |
|
struct vcldir *vdir; |
| 174 |
|
const struct vcltemp *temp; |
| 175 |
|
va_list ap; |
| 176 |
|
int i; |
| 177 |
|
|
| 178 |
60000 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 179 |
60000 |
CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC); |
| 180 |
60000 |
AN(fmt); |
| 181 |
60000 |
vcl = ctx->vcl; |
| 182 |
60000 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 183 |
|
|
| 184 |
|
// opportunistic, re-checked again under lock |
| 185 |
60000 |
if (vcl->temp == VCL_TEMP_COOLING && !DO_DEBUG(DBG_VTC_MODE)) |
| 186 |
0 |
return (NULL); |
| 187 |
|
|
| 188 |
60000 |
ALLOC_OBJ(vdir, VCLDIR_MAGIC); |
| 189 |
60000 |
AN(vdir); |
| 190 |
60000 |
ALLOC_OBJ(vdir->dir, DIRECTOR_MAGIC); |
| 191 |
60000 |
AN(vdir->dir); |
| 192 |
60000 |
vdir->dir->vdir = vdir; |
| 193 |
|
|
| 194 |
60000 |
vdir->methods = m; |
| 195 |
60000 |
vdir->dir->priv = priv; |
| 196 |
60000 |
vsb = VSB_new_auto(); |
| 197 |
60000 |
AN(vsb); |
| 198 |
60000 |
VSB_printf(vsb, "%s.", VCL_Name(vcl)); |
| 199 |
60000 |
i = VSB_len(vsb); |
| 200 |
60000 |
va_start(ap, fmt); |
| 201 |
60000 |
VSB_vprintf(vsb, fmt, ap); |
| 202 |
60000 |
va_end(ap); |
| 203 |
60000 |
AZ(VSB_finish(vsb)); |
| 204 |
60000 |
REPLACE(vdir->cli_name, VSB_data(vsb)); |
| 205 |
60000 |
VSB_destroy(&vsb); |
| 206 |
60000 |
vdir->dir->vcl_name = vdir->cli_name + i; |
| 207 |
|
|
| 208 |
60000 |
vdir->vcl = vcl; |
| 209 |
60000 |
vdir->admin_health = VDI_AH_AUTO; |
| 210 |
60000 |
vdir->health_changed = VTIM_real(); |
| 211 |
|
|
| 212 |
60000 |
vdir->refcnt++; |
| 213 |
60000 |
Lck_New(&vdir->dlck, lck_director); |
| 214 |
60000 |
vdir->dir->mtx = &vdir->dlck; |
| 215 |
|
|
| 216 |
|
/* NB: at this point we look at the VCL temperature after getting |
| 217 |
|
* through the trouble of creating the director even though it might |
| 218 |
|
* not be legal to do so. Because we change the VCL temperature before |
| 219 |
|
* sending COLD events we have to tolerate and undo attempts for the |
| 220 |
|
* COOLING case. |
| 221 |
|
* |
| 222 |
|
* To avoid deadlocks during vcl_BackendEvent, we only wait for vcl_mtx |
| 223 |
|
* if the vcl is busy (ref vcl_set_state()) |
| 224 |
|
*/ |
| 225 |
|
|
| 226 |
60000 |
while (1) { |
| 227 |
60000 |
temp = vcl->temp; |
| 228 |
60000 |
if (temp == VCL_TEMP_COOLING) |
| 229 |
0 |
return (vcldir_surplus(vdir)); |
| 230 |
60000 |
if (vcl->busy == 0 && vcl->temp->is_warm) { |
| 231 |
3880 |
if (! Lck_Trylock(&vcl_mtx)) |
| 232 |
3880 |
break; |
| 233 |
0 |
usleep(10 * 1000); |
| 234 |
0 |
continue; |
| 235 |
|
} |
| 236 |
56120 |
Lck_Lock(&vcl_mtx); |
| 237 |
56120 |
break; |
| 238 |
|
} |
| 239 |
60000 |
Lck_AssertHeld(&vcl_mtx); |
| 240 |
60000 |
temp = vcl->temp; |
| 241 |
60000 |
if (temp != VCL_TEMP_COOLING) |
| 242 |
60000 |
VTAILQ_INSERT_TAIL(&vcl->vdire->directors, vdir, directors_list); |
| 243 |
60000 |
if (temp->is_warm) |
| 244 |
4440 |
VDI_Event(vdir->dir, VCL_EVENT_WARM); |
| 245 |
60000 |
Lck_Unlock(&vcl_mtx); |
| 246 |
|
|
| 247 |
60000 |
if (temp == VCL_TEMP_COOLING) |
| 248 |
0 |
return (vcldir_surplus(vdir)); |
| 249 |
|
|
| 250 |
60000 |
if (!temp->is_warm && temp != VCL_TEMP_INIT) |
| 251 |
40 |
WRONG("Dynamic Backends can only be added to warm VCLs"); |
| 252 |
|
|
| 253 |
59960 |
return (vdir->dir); |
| 254 |
59960 |
} |
| 255 |
|
|
| 256 |
|
void |
| 257 |
52360 |
VRT_StaticDirector(VCL_BACKEND b) |
| 258 |
|
{ |
| 259 |
|
struct vcldir *vdir; |
| 260 |
|
|
| 261 |
52360 |
CHECK_OBJ_NOTNULL(b, DIRECTOR_MAGIC); |
| 262 |
52360 |
vdir = b->vdir; |
| 263 |
52360 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 264 |
52360 |
assert(vdir->refcnt == 1); |
| 265 |
52360 |
AZ(vdir->flags & VDIR_FLG_NOREFCNT); |
| 266 |
52360 |
vdir->flags |= VDIR_FLG_NOREFCNT; |
| 267 |
52360 |
} |
| 268 |
|
|
| 269 |
|
// vcldir is already removed from the directors list |
| 270 |
|
// to be called only from vdire_* |
| 271 |
|
void |
| 272 |
4343 |
vcldir_retire(struct vcldir *vdir, const struct vcltemp *temp) |
| 273 |
|
{ |
| 274 |
|
|
| 275 |
4343 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 276 |
4343 |
assert(vdir->refcnt == 0); |
| 277 |
4343 |
AN(temp); |
| 278 |
|
|
| 279 |
4343 |
if (temp->is_warm) |
| 280 |
560 |
VDI_Event(vdir->dir, VCL_EVENT_COLD); |
| 281 |
4343 |
if (vdir->methods->destroy != NULL) |
| 282 |
3747 |
vdir->methods->destroy(vdir->dir); |
| 283 |
4343 |
vcldir_free(vdir); |
| 284 |
4343 |
} |
| 285 |
|
|
| 286 |
|
static int |
| 287 |
12660 |
vcldir_deref(struct vcldir *vdir) |
| 288 |
|
{ |
| 289 |
|
int busy; |
| 290 |
|
|
| 291 |
12660 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 292 |
12660 |
AZ(vdir->flags & VDIR_FLG_NOREFCNT); |
| 293 |
|
|
| 294 |
12660 |
Lck_Lock(&vdir->dlck); |
| 295 |
12660 |
assert(vdir->refcnt > 0); |
| 296 |
12660 |
busy = --vdir->refcnt; |
| 297 |
12660 |
Lck_Unlock(&vdir->dlck); |
| 298 |
|
|
| 299 |
12660 |
if (!busy) |
| 300 |
4343 |
vdire_resign(vdir->vcl->vdire, vdir); |
| 301 |
12660 |
return (busy); |
| 302 |
|
} |
| 303 |
|
|
| 304 |
|
void |
| 305 |
3783 |
VRT_DelDirector(VCL_BACKEND *dirp) |
| 306 |
|
{ |
| 307 |
|
VCL_BACKEND dir; |
| 308 |
|
struct vcldir *vdir; |
| 309 |
|
|
| 310 |
3783 |
TAKE_OBJ_NOTNULL(dir, dirp, DIRECTOR_MAGIC); |
| 311 |
|
|
| 312 |
3783 |
vdir = dir->vdir; |
| 313 |
3783 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 314 |
|
|
| 315 |
3783 |
if (vdir->methods->release != NULL) |
| 316 |
520 |
vdir->methods->release(vdir->dir); |
| 317 |
|
|
| 318 |
3783 |
if (vdir->flags & VDIR_FLG_NOREFCNT) { |
| 319 |
2587 |
vdir->flags &= ~VDIR_FLG_NOREFCNT; |
| 320 |
2587 |
AZ(vcldir_deref(vdir)); |
| 321 |
2587 |
} else { |
| 322 |
1196 |
(void) vcldir_deref(vdir); |
| 323 |
|
} |
| 324 |
3783 |
} |
| 325 |
|
|
| 326 |
|
void |
| 327 |
700792 |
VRT_Assign_Backend(VCL_BACKEND *dst, VCL_BACKEND src) |
| 328 |
|
{ |
| 329 |
|
struct vcldir *vdir; |
| 330 |
|
VCL_BACKEND tmp; |
| 331 |
|
|
| 332 |
700792 |
AN(dst); |
| 333 |
700792 |
CHECK_OBJ_ORNULL((*dst), DIRECTOR_MAGIC); |
| 334 |
700792 |
CHECK_OBJ_ORNULL(src, DIRECTOR_MAGIC); |
| 335 |
700792 |
if (*dst == src) |
| 336 |
47236 |
return; |
| 337 |
653556 |
tmp = *dst; |
| 338 |
653556 |
*dst = src; |
| 339 |
653556 |
if (src != NULL) { |
| 340 |
332334 |
vdir = src->vdir; |
| 341 |
332334 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 342 |
332334 |
if (!(vdir->flags & VDIR_FLG_NOREFCNT)) { |
| 343 |
8520 |
Lck_Lock(&vdir->dlck); |
| 344 |
8520 |
assert(vdir->refcnt > 0); |
| 345 |
8520 |
vdir->refcnt++; |
| 346 |
8520 |
Lck_Unlock(&vdir->dlck); |
| 347 |
8520 |
} |
| 348 |
332334 |
} |
| 349 |
653556 |
if (tmp != NULL) { |
| 350 |
329961 |
vdir = tmp->vdir; |
| 351 |
329961 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 352 |
329961 |
if (!(vdir->flags & VDIR_FLG_NOREFCNT)) |
| 353 |
8880 |
(void)vcldir_deref(vdir); |
| 354 |
329961 |
} |
| 355 |
700792 |
} |
| 356 |
|
|
| 357 |
|
void |
| 358 |
3147 |
VRT_DisableDirector(VCL_BACKEND d) |
| 359 |
|
{ |
| 360 |
|
struct vcldir *vdir; |
| 361 |
|
|
| 362 |
3147 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
| 363 |
3147 |
vdir = d->vdir; |
| 364 |
3147 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
| 365 |
|
|
| 366 |
3147 |
vdir->admin_health = VDI_AH_DELETED; |
| 367 |
3147 |
vdir->health_changed = VTIM_real(); |
| 368 |
3147 |
} |
| 369 |
|
|
| 370 |
|
VCL_BACKEND |
| 371 |
40 |
VRT_LookupDirector(VRT_CTX, VCL_STRING name) |
| 372 |
|
{ |
| 373 |
|
struct vcl *vcl; |
| 374 |
|
struct vcldir *vdir; |
| 375 |
40 |
VCL_BACKEND dd, d = NULL; |
| 376 |
|
struct vdire *vdire; |
| 377 |
|
|
| 378 |
40 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 379 |
40 |
AN(name); |
| 380 |
|
|
| 381 |
40 |
assert(ctx->method & VCL_MET_TASK_H); |
| 382 |
40 |
ASSERT_CLI(); |
| 383 |
|
|
| 384 |
40 |
vcl = ctx->vcl; |
| 385 |
40 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 386 |
|
|
| 387 |
40 |
vdire = vcl->vdire; |
| 388 |
|
|
| 389 |
40 |
vdire_start_iter(vdire); |
| 390 |
40 |
VTAILQ_FOREACH(vdir, &vdire->directors, directors_list) { |
| 391 |
40 |
dd = vdir->dir; |
| 392 |
40 |
if (strcmp(dd->vcl_name, name)) |
| 393 |
0 |
continue; |
| 394 |
40 |
d = dd; |
| 395 |
40 |
break; |
| 396 |
|
} |
| 397 |
40 |
vdire_end_iter(vdire); |
| 398 |
|
|
| 399 |
40 |
return (d); |
| 400 |
|
} |
| 401 |
|
|
| 402 |
|
/*--------------------------------------------------------------------*/ |
| 403 |
|
|
| 404 |
|
VCL_BACKEND |
| 405 |
150864 |
VCL_DefaultDirector(const struct vcl *vcl) |
| 406 |
|
{ |
| 407 |
|
|
| 408 |
150864 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 409 |
150864 |
CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC); |
| 410 |
150864 |
return (*vcl->conf->default_director); |
| 411 |
|
} |
| 412 |
|
|
| 413 |
|
const char * |
| 414 |
388137 |
VCL_Name(const struct vcl *vcl) |
| 415 |
|
{ |
| 416 |
|
|
| 417 |
388137 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 418 |
388137 |
return (vcl->loaded_name); |
| 419 |
|
} |
| 420 |
|
|
| 421 |
|
VCL_PROBE |
| 422 |
52640 |
VCL_DefaultProbe(const struct vcl *vcl) |
| 423 |
|
{ |
| 424 |
|
|
| 425 |
52640 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 426 |
52640 |
CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC); |
| 427 |
52640 |
return (vcl->conf->default_probe); |
| 428 |
|
} |
| 429 |
|
|
| 430 |
|
/*--------------------------------------------------------------------*/ |
| 431 |
|
|
| 432 |
|
void |
| 433 |
13668 |
VRT_CTX_Assert(VRT_CTX) |
| 434 |
|
{ |
| 435 |
13668 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 436 |
|
|
| 437 |
13668 |
if (ctx->msg != NULL) |
| 438 |
880 |
CHECK_OBJ(ctx->msg, VSB_MAGIC); |
| 439 |
|
else |
| 440 |
12788 |
AN(ctx->vsl); |
| 441 |
13668 |
CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC); |
| 442 |
13668 |
WS_Assert(ctx->ws); |
| 443 |
|
|
| 444 |
13668 |
CHECK_OBJ_ORNULL(ctx->sp, SESS_MAGIC); |
| 445 |
|
|
| 446 |
13668 |
CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC); |
| 447 |
13668 |
CHECK_OBJ_ORNULL(ctx->http_req, HTTP_MAGIC); |
| 448 |
13668 |
CHECK_OBJ_ORNULL(ctx->http_req_top, HTTP_MAGIC); |
| 449 |
13668 |
CHECK_OBJ_ORNULL(ctx->http_resp, HTTP_MAGIC); |
| 450 |
|
|
| 451 |
13668 |
CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC); |
| 452 |
13668 |
CHECK_OBJ_ORNULL(ctx->http_bereq, HTTP_MAGIC); |
| 453 |
13668 |
CHECK_OBJ_ORNULL(ctx->http_beresp, HTTP_MAGIC); |
| 454 |
13668 |
} |
| 455 |
|
|
| 456 |
|
struct vclref * |
| 457 |
40 |
VRT_VCL_Prevent_Cold(VRT_CTX, const char *desc) |
| 458 |
|
{ |
| 459 |
|
struct vclref* ref; |
| 460 |
|
|
| 461 |
40 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 462 |
40 |
CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC); |
| 463 |
|
|
| 464 |
40 |
ALLOC_OBJ(ref, VCLREF_MAGIC); |
| 465 |
40 |
AN(ref); |
| 466 |
40 |
ref->vcl = ctx->vcl; |
| 467 |
40 |
REPLACE(ref->desc, desc); |
| 468 |
|
|
| 469 |
40 |
VCL_Ref(ctx->vcl); |
| 470 |
|
|
| 471 |
40 |
Lck_Lock(&vcl_mtx); |
| 472 |
40 |
VTAILQ_INSERT_TAIL(&ctx->vcl->ref_list, ref, list); |
| 473 |
40 |
Lck_Unlock(&vcl_mtx); |
| 474 |
|
|
| 475 |
40 |
return (ref); |
| 476 |
|
} |
| 477 |
|
|
| 478 |
|
void |
| 479 |
40 |
VRT_VCL_Allow_Cold(struct vclref **refp) |
| 480 |
|
{ |
| 481 |
|
struct vcl *vcl; |
| 482 |
|
struct vclref *ref; |
| 483 |
|
|
| 484 |
40 |
TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC); |
| 485 |
40 |
vcl = ref->vcl; |
| 486 |
40 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 487 |
|
|
| 488 |
40 |
Lck_Lock(&vcl_mtx); |
| 489 |
40 |
assert(!VTAILQ_EMPTY(&vcl->ref_list)); |
| 490 |
40 |
VTAILQ_REMOVE(&vcl->ref_list, ref, list); |
| 491 |
40 |
Lck_Unlock(&vcl_mtx); |
| 492 |
|
|
| 493 |
40 |
VCL_Rel(&vcl); |
| 494 |
|
|
| 495 |
40 |
REPLACE(ref->desc, NULL); |
| 496 |
40 |
FREE_OBJ(ref); |
| 497 |
40 |
} |
| 498 |
|
|
| 499 |
|
struct vclref * |
| 500 |
3840 |
VRT_VCL_Prevent_Discard(VRT_CTX, const char *desc) |
| 501 |
|
{ |
| 502 |
|
struct vcl *vcl; |
| 503 |
|
struct vclref* ref; |
| 504 |
|
|
| 505 |
3840 |
ASSERT_CLI(); |
| 506 |
3840 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 507 |
3840 |
AN(desc); |
| 508 |
3840 |
AN(*desc); |
| 509 |
|
|
| 510 |
3840 |
vcl = ctx->vcl; |
| 511 |
3840 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 512 |
3840 |
assert(vcl->temp->is_warm); |
| 513 |
|
|
| 514 |
3840 |
ALLOC_OBJ(ref, VCLREF_MAGIC); |
| 515 |
3840 |
AN(ref); |
| 516 |
3840 |
ref->vcl = vcl; |
| 517 |
3840 |
REPLACE(ref->desc, desc); |
| 518 |
|
|
| 519 |
3840 |
Lck_Lock(&vcl_mtx); |
| 520 |
3840 |
VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list); |
| 521 |
3840 |
vcl->nrefs++; |
| 522 |
3840 |
Lck_Unlock(&vcl_mtx); |
| 523 |
|
|
| 524 |
3840 |
return (ref); |
| 525 |
|
} |
| 526 |
|
|
| 527 |
|
void |
| 528 |
556 |
VRT_VCL_Allow_Discard(struct vclref **refp) |
| 529 |
|
{ |
| 530 |
|
struct vcl *vcl; |
| 531 |
|
struct vclref *ref; |
| 532 |
|
|
| 533 |
556 |
TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC); |
| 534 |
556 |
vcl = ref->vcl; |
| 535 |
556 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
| 536 |
|
|
| 537 |
|
/* NB: A VCL may be released by a VMOD at any time, but it must happen |
| 538 |
|
* after a warmup and before the end of a cooldown. The release may or |
| 539 |
|
* may not happen while the same thread holds the temperature lock, so |
| 540 |
|
* instead we check that all references are gone in VCL_Nuke. |
| 541 |
|
*/ |
| 542 |
|
|
| 543 |
556 |
Lck_Lock(&vcl_mtx); |
| 544 |
556 |
assert(!VTAILQ_EMPTY(&vcl->ref_list)); |
| 545 |
556 |
VTAILQ_REMOVE(&vcl->ref_list, ref, list); |
| 546 |
556 |
vcl->nrefs--; |
| 547 |
|
/* No garbage collection here, for the same reasons as in VCL_Rel. */ |
| 548 |
556 |
Lck_Unlock(&vcl_mtx); |
| 549 |
|
|
| 550 |
556 |
REPLACE(ref->desc, NULL); |
| 551 |
556 |
FREE_OBJ(ref); |
| 552 |
556 |
} |
| 553 |
|
|
| 554 |
|
/*-------------------------------------------------------------------- |
| 555 |
|
*/ |
| 556 |
|
|
| 557 |
|
static int |
| 558 |
595434 |
req_poll(struct worker *wrk, struct req *req) |
| 559 |
|
{ |
| 560 |
|
struct req *top; |
| 561 |
|
|
| 562 |
|
/* NB: Since a fail transition leads to vcl_synth, the request may be |
| 563 |
|
* short-circuited twice. |
| 564 |
|
*/ |
| 565 |
595434 |
if (req->req_reset) { |
| 566 |
1079 |
wrk->vpi->handling = VCL_RET_FAIL; |
| 567 |
1079 |
return (-1); |
| 568 |
|
} |
| 569 |
|
|
| 570 |
594355 |
top = req->top->topreq; |
| 571 |
594355 |
CHECK_OBJ_NOTNULL(top, REQ_MAGIC); |
| 572 |
594355 |
CHECK_OBJ_NOTNULL(top->transport, TRANSPORT_MAGIC); |
| 573 |
|
|
| 574 |
594355 |
if (!FEATURE(FEATURE_VCL_REQ_RESET)) |
| 575 |
798 |
return (0); |
| 576 |
593557 |
if (top->transport->poll == NULL) |
| 577 |
574801 |
return (0); |
| 578 |
18756 |
if (top->transport->poll(top) >= 0) |
| 579 |
17679 |
return (0); |
| 580 |
|
|
| 581 |
1077 |
VSLb_ts_req(req, "Reset", W_TIM_real(wrk)); |
| 582 |
1077 |
wrk->stats->req_reset++; |
| 583 |
1077 |
wrk->vpi->handling = VCL_RET_FAIL; |
| 584 |
1077 |
req->req_reset = 1; |
| 585 |
1077 |
return (-1); |
| 586 |
595434 |
} |
| 587 |
|
|
| 588 |
|
/*-------------------------------------------------------------------- |
| 589 |
|
* Method functions to call into VCL programs. |
| 590 |
|
* |
| 591 |
|
* Either the request or busyobject must be specified, but not both. |
| 592 |
|
* The workspace argument is where random VCL stuff gets space from. |
| 593 |
|
*/ |
| 594 |
|
|
| 595 |
|
static void |
| 596 |
786217 |
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo, |
| 597 |
|
void *specific, unsigned method, vcl_func_f *func, unsigned track_call) |
| 598 |
|
{ |
| 599 |
786217 |
uintptr_t rws = 0, aws; |
| 600 |
|
struct vrt_ctx ctx; |
| 601 |
|
struct vbitmap *vbm; |
| 602 |
|
void *p; |
| 603 |
|
size_t sz; |
| 604 |
|
|
| 605 |
786217 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 606 |
786217 |
INIT_OBJ(&ctx, VRT_CTX_MAGIC); |
| 607 |
786217 |
if (bo != NULL) { |
| 608 |
192266 |
CHECK_OBJ(bo, BUSYOBJ_MAGIC); |
| 609 |
192266 |
CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC); |
| 610 |
192266 |
VCL_Bo2Ctx(&ctx, bo); |
| 611 |
192266 |
} |
| 612 |
786217 |
if (req != NULL) { |
| 613 |
595438 |
if (bo != NULL) |
| 614 |
1160 |
assert(method == VCL_MET_PIPE); |
| 615 |
595438 |
CHECK_OBJ(req, REQ_MAGIC); |
| 616 |
595438 |
CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC); |
| 617 |
595438 |
CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC); |
| 618 |
595438 |
CHECK_OBJ_NOTNULL(req->top, REQTOP_MAGIC); |
| 619 |
595438 |
if (req_poll(wrk, req)) |
| 620 |
2156 |
return; |
| 621 |
593282 |
VCL_Req2Ctx(&ctx, req); |
| 622 |
593282 |
} |
| 623 |
784061 |
assert(ctx.now != 0); |
| 624 |
784061 |
ctx.specific = specific; |
| 625 |
784061 |
ctx.method = method; |
| 626 |
784061 |
if (track_call > 0) { |
| 627 |
1635 |
rws = WS_Snapshot(wrk->aws); |
| 628 |
1635 |
sz = VBITMAP_SZ(track_call); |
| 629 |
1635 |
p = WS_Alloc(wrk->aws, sz); |
| 630 |
|
// No use to attempt graceful failure, all VCL calls will fail |
| 631 |
1635 |
AN(p); |
| 632 |
1635 |
vbm = vbit_init(p, sz); |
| 633 |
1635 |
ctx.called = vbm; |
| 634 |
1635 |
} |
| 635 |
784061 |
aws = WS_Snapshot(wrk->aws); |
| 636 |
784061 |
wrk->cur_method = method; |
| 637 |
784061 |
wrk->seen_methods |= method; |
| 638 |
784061 |
AN(ctx.vsl); |
| 639 |
784061 |
VSLbs(ctx.vsl, SLT_VCL_call, TOSTRAND(VCL_Method_Name(method))); |
| 640 |
784061 |
func(&ctx, VSUB_STATIC, NULL); |
| 641 |
1568122 |
VSLbs(ctx.vsl, SLT_VCL_return, |
| 642 |
784061 |
TOSTRAND(VCL_Return_Name(wrk->vpi->handling))); |
| 643 |
784061 |
wrk->cur_method |= 1; // Magic marker |
| 644 |
784061 |
if (wrk->vpi->handling == VCL_RET_FAIL) |
| 645 |
5800 |
wrk->stats->vcl_fail++; |
| 646 |
|
|
| 647 |
|
/* |
| 648 |
|
* VCL/Vmods are not allowed to make permanent allocations from |
| 649 |
|
* wrk->aws, but they can reserve and return from it. |
| 650 |
|
*/ |
| 651 |
784061 |
assert(aws == WS_Snapshot(wrk->aws)); |
| 652 |
784061 |
if (rws != 0) |
| 653 |
1634 |
WS_Reset(wrk->aws, rws); |
| 654 |
786217 |
} |
| 655 |
|
|
| 656 |
|
#define VCL_MET_MAC(func, upper, typ, bitmap) \ |
| 657 |
|
void \ |
| 658 |
|
VCL_##func##_method(struct vcl *vcl, struct worker *wrk, \ |
| 659 |
|
struct req *req, struct busyobj *bo, void *specific) \ |
| 660 |
|
{ \ |
| 661 |
|
\ |
| 662 |
|
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); \ |
| 663 |
|
CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC); \ |
| 664 |
|
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); \ |
| 665 |
|
vcl_call_method(wrk, req, bo, specific, \ |
| 666 |
|
VCL_MET_ ## upper, vcl->conf->func##_func, vcl->conf->nsub);\ |
| 667 |
|
AN((1U << wrk->vpi->handling) & bitmap); \ |
| 668 |
|
} |
| 669 |
|
|
| 670 |
|
#include "tbl/vcl_returns.h" |
| 671 |
|
|
| 672 |
|
/*-------------------------------------------------------------------- |
| 673 |
|
*/ |
| 674 |
|
|
| 675 |
|
VCL_STRING |
| 676 |
40 |
VRT_check_call(VRT_CTX, VCL_SUB sub) |
| 677 |
|
{ |
| 678 |
40 |
VCL_STRING err = NULL; |
| 679 |
|
enum vcl_func_fail_e fail; |
| 680 |
|
|
| 681 |
40 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 682 |
40 |
CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC); |
| 683 |
|
|
| 684 |
40 |
AN(sub->func); |
| 685 |
40 |
sub->func(ctx, VSUB_CHECK, &fail); |
| 686 |
|
|
| 687 |
40 |
switch (fail) { |
| 688 |
|
case VSUB_E_OK: |
| 689 |
0 |
break; |
| 690 |
|
case VSUB_E_METHOD: |
| 691 |
80 |
err = WS_Printf(ctx->ws, "Dynamic call to \"sub %s{}\"" |
| 692 |
40 |
" not allowed from here", sub->name); |
| 693 |
40 |
if (err == NULL) |
| 694 |
0 |
err = "Dynamic call not allowed and workspace overflow"; |
| 695 |
40 |
break; |
| 696 |
|
case VSUB_E_RECURSE: |
| 697 |
0 |
err = WS_Printf(ctx->ws, "Recursive dynamic call to" |
| 698 |
0 |
" \"sub %s{}\"", sub->name); |
| 699 |
0 |
if (err == NULL) |
| 700 |
0 |
err = "Recursive dynamic call and workspace overflow"; |
| 701 |
0 |
break; |
| 702 |
|
default: |
| 703 |
0 |
INCOMPL(); |
| 704 |
0 |
} |
| 705 |
|
|
| 706 |
40 |
return (err); |
| 707 |
|
} |
| 708 |
|
|
| 709 |
|
VCL_VOID |
| 710 |
600 |
VRT_call(VRT_CTX, VCL_SUB sub) |
| 711 |
|
{ |
| 712 |
|
|
| 713 |
600 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 714 |
600 |
CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC); |
| 715 |
|
|
| 716 |
600 |
AZ(VRT_handled(ctx)); |
| 717 |
600 |
AN(sub->func); |
| 718 |
600 |
sub->func(ctx, VSUB_DYNAMIC, NULL); |
| 719 |
600 |
} |