| | varnish-cache/bin/varnishd/cache/cache_vrt_vcl.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2016 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
* |
30 |
|
*/ |
31 |
|
|
32 |
|
#include "config.h" |
33 |
|
|
34 |
|
#include <stdio.h> |
35 |
|
#include <stdlib.h> |
36 |
|
|
37 |
|
#include "cache_varnishd.h" |
38 |
|
|
39 |
|
#include "vcl.h" |
40 |
|
#include "vtim.h" |
41 |
|
#include "vbm.h" |
42 |
|
|
43 |
|
#include "cache_director.h" |
44 |
|
#include "cache_transport.h" |
45 |
|
#include "cache_vcl.h" |
46 |
|
#include "vcc_interface.h" |
47 |
|
|
48 |
|
/*--------------------------------------------------------------------*/ |
49 |
|
|
50 |
|
const char * |
51 |
743955 |
VCL_Return_Name(unsigned r) |
52 |
|
{ |
53 |
|
|
54 |
743955 |
switch (r) { |
55 |
|
#define VCL_RET_MAC(l, U, B) \ |
56 |
|
case VCL_RET_##U: \ |
57 |
|
return(#l); |
58 |
|
#include "tbl/vcl_returns.h" |
59 |
|
default: |
60 |
|
return (NULL); |
61 |
|
} |
62 |
743955 |
} |
63 |
|
|
64 |
|
const char * |
65 |
744748 |
VCL_Method_Name(unsigned m) |
66 |
|
{ |
67 |
|
|
68 |
744748 |
switch (m) { |
69 |
|
#define VCL_MET_MAC(func, upper, typ, bitmap) \ |
70 |
|
case VCL_MET_##upper: \ |
71 |
|
return (#upper); |
72 |
|
#include "tbl/vcl_returns.h" |
73 |
|
default: |
74 |
|
return (NULL); |
75 |
|
} |
76 |
744748 |
} |
77 |
|
|
78 |
|
/*--------------------------------------------------------------------*/ |
79 |
|
|
80 |
|
void |
81 |
127935 |
VCL_Refresh(struct vcl **vcc) |
82 |
|
{ |
83 |
|
|
84 |
127935 |
while (vcl_active == NULL) |
85 |
0 |
(void)usleep(100000); |
86 |
|
|
87 |
127935 |
ASSERT_VCL_ACTIVE(); |
88 |
127927 |
if (*vcc == vcl_active) |
89 |
73656 |
return; |
90 |
|
|
91 |
54271 |
VCL_Update(vcc, NULL); |
92 |
127927 |
} |
93 |
|
|
94 |
|
void |
95 |
128128 |
VCL_Recache(const struct worker *wrk, struct vcl **vclp) |
96 |
|
{ |
97 |
|
|
98 |
128128 |
AN(wrk); |
99 |
128128 |
AN(vclp); |
100 |
128128 |
CHECK_OBJ_NOTNULL(*vclp, VCL_MAGIC); |
101 |
128128 |
ASSERT_VCL_ACTIVE(); |
102 |
|
|
103 |
128130 |
if (*vclp != vcl_active || wrk->wpriv->vcl == vcl_active) { |
104 |
2246 |
VCL_Rel(vclp); |
105 |
2246 |
return; |
106 |
|
} |
107 |
125884 |
if (wrk->wpriv->vcl != NULL) |
108 |
0 |
VCL_Rel(&wrk->wpriv->vcl); |
109 |
125884 |
wrk->wpriv->vcl = *vclp; |
110 |
125884 |
*vclp = NULL; |
111 |
128130 |
} |
112 |
|
|
113 |
|
void |
114 |
103840 |
VCL_Ref(struct vcl *vcl) |
115 |
|
{ |
116 |
|
|
117 |
103840 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
118 |
103840 |
assert(!vcl->temp->is_cold); |
119 |
103840 |
Lck_Lock(&vcl_mtx); |
120 |
103840 |
assert(vcl->busy > 0); |
121 |
103840 |
vcl->busy++; |
122 |
103840 |
Lck_Unlock(&vcl_mtx); |
123 |
103840 |
} |
124 |
|
|
125 |
|
void |
126 |
116012 |
VCL_Rel(struct vcl **vcc) |
127 |
|
{ |
128 |
|
struct vcl *vcl; |
129 |
|
|
130 |
116012 |
TAKE_OBJ_NOTNULL(vcl, vcc, VCL_MAGIC); |
131 |
116012 |
Lck_Lock(&vcl_mtx); |
132 |
116012 |
assert(vcl->busy > 0); |
133 |
116012 |
vcl->busy--; |
134 |
|
/* |
135 |
|
* We do not garbage collect discarded VCL's here, that happens |
136 |
|
* in VCL_Poll() which is called from the CLI thread. |
137 |
|
*/ |
138 |
116012 |
Lck_Unlock(&vcl_mtx); |
139 |
116012 |
} |
140 |
|
|
141 |
|
/*--------------------------------------------------------------------*/ |
142 |
|
|
143 |
|
static void |
144 |
4341 |
vcldir_free(struct vcldir *vdir) |
145 |
|
{ |
146 |
|
|
147 |
4341 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
148 |
4341 |
CHECK_OBJ_NOTNULL(vdir->dir, DIRECTOR_MAGIC); |
149 |
4341 |
AZ(vdir->refcnt); |
150 |
4341 |
Lck_Delete(&vdir->dlck); |
151 |
4341 |
free(vdir->cli_name); |
152 |
4341 |
FREE_OBJ(vdir->dir); |
153 |
4341 |
FREE_OBJ(vdir); |
154 |
4341 |
} |
155 |
|
|
156 |
|
static VCL_BACKEND |
157 |
0 |
vcldir_surplus(struct vcldir *vdir) |
158 |
|
{ |
159 |
|
|
160 |
0 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
161 |
0 |
assert(vdir->refcnt == 1); |
162 |
0 |
vdir->refcnt = 0; |
163 |
0 |
vcldir_free(vdir); |
164 |
0 |
return (NULL); |
165 |
|
} |
166 |
|
|
167 |
|
VCL_BACKEND |
168 |
57920 |
VRT_AddDirector(VRT_CTX, const struct vdi_methods *m, void *priv, |
169 |
|
const char *fmt, ...) |
170 |
|
{ |
171 |
|
struct vsb *vsb; |
172 |
|
struct vcl *vcl; |
173 |
|
struct vcldir *vdir; |
174 |
|
const struct vcltemp *temp; |
175 |
|
va_list ap; |
176 |
|
int i; |
177 |
|
|
178 |
57920 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
179 |
57920 |
CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC); |
180 |
57920 |
AN(fmt); |
181 |
57920 |
vcl = ctx->vcl; |
182 |
57920 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
183 |
|
|
184 |
|
// opportunistic, re-checked again under lock |
185 |
57920 |
if (vcl->temp == VCL_TEMP_COOLING && !DO_DEBUG(DBG_VTC_MODE)) |
186 |
0 |
return (NULL); |
187 |
|
|
188 |
57920 |
ALLOC_OBJ(vdir, VCLDIR_MAGIC); |
189 |
57920 |
AN(vdir); |
190 |
57920 |
ALLOC_OBJ(vdir->dir, DIRECTOR_MAGIC); |
191 |
57920 |
AN(vdir->dir); |
192 |
57920 |
vdir->dir->vdir = vdir; |
193 |
|
|
194 |
57920 |
vdir->methods = m; |
195 |
57920 |
vdir->dir->priv = priv; |
196 |
57920 |
vsb = VSB_new_auto(); |
197 |
57920 |
AN(vsb); |
198 |
57920 |
VSB_printf(vsb, "%s.", VCL_Name(vcl)); |
199 |
57920 |
i = VSB_len(vsb); |
200 |
57920 |
va_start(ap, fmt); |
201 |
57920 |
VSB_vprintf(vsb, fmt, ap); |
202 |
57920 |
va_end(ap); |
203 |
57920 |
AZ(VSB_finish(vsb)); |
204 |
57920 |
REPLACE(vdir->cli_name, VSB_data(vsb)); |
205 |
57920 |
VSB_destroy(&vsb); |
206 |
57920 |
vdir->dir->vcl_name = vdir->cli_name + i; |
207 |
|
|
208 |
57920 |
vdir->vcl = vcl; |
209 |
57920 |
vdir->admin_health = VDI_AH_AUTO; |
210 |
57920 |
vdir->health_changed = VTIM_real(); |
211 |
|
|
212 |
57920 |
vdir->refcnt++; |
213 |
57920 |
Lck_New(&vdir->dlck, lck_director); |
214 |
57920 |
vdir->dir->mtx = &vdir->dlck; |
215 |
|
|
216 |
|
/* NB: at this point we look at the VCL temperature after getting |
217 |
|
* through the trouble of creating the director even though it might |
218 |
|
* not be legal to do so. Because we change the VCL temperature before |
219 |
|
* sending COLD events we have to tolerate and undo attempts for the |
220 |
|
* COOLING case. |
221 |
|
* |
222 |
|
* To avoid deadlocks during vcl_BackendEvent, we only wait for vcl_mtx |
223 |
|
* if the vcl is busy (ref vcl_set_state()) |
224 |
|
*/ |
225 |
|
|
226 |
57920 |
while (1) { |
227 |
57920 |
temp = vcl->temp; |
228 |
57920 |
if (temp == VCL_TEMP_COOLING) |
229 |
0 |
return (vcldir_surplus(vdir)); |
230 |
57920 |
if (vcl->busy == 0 && vcl->temp->is_warm) { |
231 |
3640 |
if (! Lck_Trylock(&vcl_mtx)) |
232 |
3640 |
break; |
233 |
0 |
usleep(10 * 1000); |
234 |
0 |
continue; |
235 |
|
} |
236 |
54280 |
Lck_Lock(&vcl_mtx); |
237 |
54280 |
break; |
238 |
|
} |
239 |
57920 |
Lck_AssertHeld(&vcl_mtx); |
240 |
57920 |
temp = vcl->temp; |
241 |
57920 |
if (temp != VCL_TEMP_COOLING) |
242 |
57920 |
VTAILQ_INSERT_TAIL(&vcl->director_list, vdir, list); |
243 |
57920 |
if (temp->is_warm) |
244 |
4200 |
VDI_Event(vdir->dir, VCL_EVENT_WARM); |
245 |
57920 |
Lck_Unlock(&vcl_mtx); |
246 |
|
|
247 |
57920 |
if (temp == VCL_TEMP_COOLING) |
248 |
0 |
return (vcldir_surplus(vdir)); |
249 |
|
|
250 |
57920 |
if (!temp->is_warm && temp != VCL_TEMP_INIT) |
251 |
40 |
WRONG("Dynamic Backends can only be added to warm VCLs"); |
252 |
|
|
253 |
57880 |
return (vdir->dir); |
254 |
57880 |
} |
255 |
|
|
256 |
|
void |
257 |
50520 |
VRT_StaticDirector(VCL_BACKEND b) |
258 |
|
{ |
259 |
|
struct vcldir *vdir; |
260 |
|
|
261 |
50520 |
CHECK_OBJ_NOTNULL(b, DIRECTOR_MAGIC); |
262 |
50520 |
vdir = b->vdir; |
263 |
50520 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
264 |
50520 |
assert(vdir->refcnt == 1); |
265 |
50520 |
AZ(vdir->flags & VDIR_FLG_NOREFCNT); |
266 |
50520 |
vdir->flags |= VDIR_FLG_NOREFCNT; |
267 |
50520 |
} |
268 |
|
|
269 |
|
static void |
270 |
4341 |
vcldir_retire(struct vcldir *vdir) |
271 |
|
{ |
272 |
|
const struct vcltemp *temp; |
273 |
|
|
274 |
4341 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
275 |
4341 |
assert(vdir->refcnt == 0); |
276 |
4341 |
CHECK_OBJ_NOTNULL(vdir->vcl, VCL_MAGIC); |
277 |
|
|
278 |
4341 |
Lck_Lock(&vcl_mtx); |
279 |
4341 |
temp = vdir->vcl->temp; |
280 |
4341 |
VTAILQ_REMOVE(&vdir->vcl->director_list, vdir, list); |
281 |
4341 |
Lck_Unlock(&vcl_mtx); |
282 |
|
|
283 |
4341 |
if (temp->is_warm) |
284 |
560 |
VDI_Event(vdir->dir, VCL_EVENT_COLD); |
285 |
4341 |
if (vdir->methods->destroy != NULL) |
286 |
3744 |
vdir->methods->destroy(vdir->dir); |
287 |
4341 |
vcldir_free(vdir); |
288 |
4341 |
} |
289 |
|
|
290 |
|
static int |
291 |
12658 |
vcldir_deref(struct vcldir *vdir) |
292 |
|
{ |
293 |
|
int busy; |
294 |
|
|
295 |
12658 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
296 |
12658 |
AZ(vdir->flags & VDIR_FLG_NOREFCNT); |
297 |
|
|
298 |
12658 |
Lck_Lock(&vdir->dlck); |
299 |
12658 |
assert(vdir->refcnt > 0); |
300 |
12658 |
busy = --vdir->refcnt; |
301 |
12658 |
Lck_Unlock(&vdir->dlck); |
302 |
|
|
303 |
12658 |
if (!busy) |
304 |
4341 |
vcldir_retire(vdir); |
305 |
12658 |
return (busy); |
306 |
|
} |
307 |
|
|
308 |
|
void |
309 |
3781 |
VRT_DelDirector(VCL_BACKEND *dirp) |
310 |
|
{ |
311 |
|
VCL_BACKEND dir; |
312 |
|
struct vcldir *vdir; |
313 |
|
|
314 |
3781 |
TAKE_OBJ_NOTNULL(dir, dirp, DIRECTOR_MAGIC); |
315 |
|
|
316 |
3781 |
vdir = dir->vdir; |
317 |
3781 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
318 |
|
|
319 |
3781 |
if (vdir->methods->release != NULL) |
320 |
520 |
vdir->methods->release(vdir->dir); |
321 |
|
|
322 |
3781 |
if (vdir->flags & VDIR_FLG_NOREFCNT) { |
323 |
2584 |
vdir->flags &= ~VDIR_FLG_NOREFCNT; |
324 |
2584 |
AZ(vcldir_deref(vdir)); |
325 |
2584 |
} else { |
326 |
1197 |
(void) vcldir_deref(vdir); |
327 |
|
} |
328 |
3781 |
} |
329 |
|
|
330 |
|
void |
331 |
671515 |
VRT_Assign_Backend(VCL_BACKEND *dst, VCL_BACKEND src) |
332 |
|
{ |
333 |
|
struct vcldir *vdir; |
334 |
|
|
335 |
671515 |
AN(dst); |
336 |
671515 |
CHECK_OBJ_ORNULL((*dst), DIRECTOR_MAGIC); |
337 |
671515 |
CHECK_OBJ_ORNULL(src, DIRECTOR_MAGIC); |
338 |
671515 |
if (*dst != NULL) { |
339 |
324470 |
vdir = (*dst)->vdir; |
340 |
324470 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
341 |
324470 |
if (!(vdir->flags & VDIR_FLG_NOREFCNT)) |
342 |
8876 |
(void)vcldir_deref(vdir); |
343 |
324470 |
} |
344 |
671515 |
if (src != NULL) { |
345 |
326843 |
vdir = src->vdir; |
346 |
326843 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
347 |
326843 |
if (!(vdir->flags & VDIR_FLG_NOREFCNT)) { |
348 |
8520 |
Lck_Lock(&vdir->dlck); |
349 |
8520 |
assert(vdir->refcnt > 0); |
350 |
8520 |
vdir->refcnt++; |
351 |
8520 |
Lck_Unlock(&vdir->dlck); |
352 |
8520 |
} |
353 |
326843 |
} |
354 |
671515 |
*dst = src; |
355 |
671515 |
} |
356 |
|
|
357 |
|
void |
358 |
3144 |
VRT_DisableDirector(VCL_BACKEND d) |
359 |
|
{ |
360 |
|
struct vcldir *vdir; |
361 |
|
|
362 |
3144 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
363 |
3144 |
vdir = d->vdir; |
364 |
3144 |
CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); |
365 |
|
|
366 |
3144 |
vdir->admin_health = VDI_AH_DELETED; |
367 |
3144 |
vdir->health_changed = VTIM_real(); |
368 |
3144 |
} |
369 |
|
|
370 |
|
VCL_BACKEND |
371 |
40 |
VRT_LookupDirector(VRT_CTX, VCL_STRING name) |
372 |
|
{ |
373 |
|
struct vcl *vcl; |
374 |
|
struct vcldir *vdir; |
375 |
40 |
VCL_BACKEND dd, d = NULL; |
376 |
|
|
377 |
40 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
378 |
40 |
AN(name); |
379 |
|
|
380 |
40 |
assert(ctx->method & VCL_MET_TASK_H); |
381 |
40 |
ASSERT_CLI(); |
382 |
|
|
383 |
40 |
vcl = ctx->vcl; |
384 |
40 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
385 |
|
|
386 |
40 |
Lck_Lock(&vcl_mtx); |
387 |
40 |
VTAILQ_FOREACH(vdir, &vcl->director_list, list) { |
388 |
40 |
dd = vdir->dir; |
389 |
40 |
if (strcmp(dd->vcl_name, name)) |
390 |
0 |
continue; |
391 |
40 |
d = dd; |
392 |
40 |
break; |
393 |
|
} |
394 |
40 |
Lck_Unlock(&vcl_mtx); |
395 |
|
|
396 |
40 |
return (d); |
397 |
|
} |
398 |
|
|
399 |
|
/*--------------------------------------------------------------------*/ |
400 |
|
|
401 |
|
VCL_BACKEND |
402 |
142772 |
VCL_DefaultDirector(const struct vcl *vcl) |
403 |
|
{ |
404 |
|
|
405 |
142772 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
406 |
142772 |
CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC); |
407 |
142772 |
return (*vcl->conf->default_director); |
408 |
|
} |
409 |
|
|
410 |
|
const char * |
411 |
371135 |
VCL_Name(const struct vcl *vcl) |
412 |
|
{ |
413 |
|
|
414 |
371135 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
415 |
371135 |
return (vcl->loaded_name); |
416 |
|
} |
417 |
|
|
418 |
|
VCL_PROBE |
419 |
50800 |
VCL_DefaultProbe(const struct vcl *vcl) |
420 |
|
{ |
421 |
|
|
422 |
50800 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
423 |
50800 |
CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC); |
424 |
50800 |
return (vcl->conf->default_probe); |
425 |
|
} |
426 |
|
|
427 |
|
/*--------------------------------------------------------------------*/ |
428 |
|
|
429 |
|
void |
430 |
13628 |
VRT_CTX_Assert(VRT_CTX) |
431 |
|
{ |
432 |
13628 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
433 |
|
|
434 |
13628 |
if (ctx->msg != NULL) |
435 |
880 |
CHECK_OBJ(ctx->msg, VSB_MAGIC); |
436 |
|
else |
437 |
12748 |
AN(ctx->vsl); |
438 |
13628 |
CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC); |
439 |
13628 |
WS_Assert(ctx->ws); |
440 |
|
|
441 |
13628 |
CHECK_OBJ_ORNULL(ctx->sp, SESS_MAGIC); |
442 |
|
|
443 |
13628 |
CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC); |
444 |
13628 |
CHECK_OBJ_ORNULL(ctx->http_req, HTTP_MAGIC); |
445 |
13628 |
CHECK_OBJ_ORNULL(ctx->http_req_top, HTTP_MAGIC); |
446 |
13628 |
CHECK_OBJ_ORNULL(ctx->http_resp, HTTP_MAGIC); |
447 |
|
|
448 |
13628 |
CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC); |
449 |
13628 |
CHECK_OBJ_ORNULL(ctx->http_bereq, HTTP_MAGIC); |
450 |
13628 |
CHECK_OBJ_ORNULL(ctx->http_beresp, HTTP_MAGIC); |
451 |
13628 |
} |
452 |
|
|
453 |
|
struct vclref * |
454 |
40 |
VRT_VCL_Prevent_Cold(VRT_CTX, const char *desc) |
455 |
|
{ |
456 |
|
struct vclref* ref; |
457 |
|
|
458 |
40 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
459 |
40 |
CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC); |
460 |
|
|
461 |
40 |
ALLOC_OBJ(ref, VCLREF_MAGIC); |
462 |
40 |
AN(ref); |
463 |
40 |
ref->vcl = ctx->vcl; |
464 |
40 |
REPLACE(ref->desc, desc); |
465 |
|
|
466 |
40 |
VCL_Ref(ctx->vcl); |
467 |
|
|
468 |
40 |
Lck_Lock(&vcl_mtx); |
469 |
40 |
VTAILQ_INSERT_TAIL(&ctx->vcl->ref_list, ref, list); |
470 |
40 |
Lck_Unlock(&vcl_mtx); |
471 |
|
|
472 |
40 |
return (ref); |
473 |
|
} |
474 |
|
|
475 |
|
void |
476 |
40 |
VRT_VCL_Allow_Cold(struct vclref **refp) |
477 |
|
{ |
478 |
|
struct vcl *vcl; |
479 |
|
struct vclref *ref; |
480 |
|
|
481 |
40 |
TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC); |
482 |
40 |
vcl = ref->vcl; |
483 |
40 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
484 |
|
|
485 |
40 |
Lck_Lock(&vcl_mtx); |
486 |
40 |
assert(!VTAILQ_EMPTY(&vcl->ref_list)); |
487 |
40 |
VTAILQ_REMOVE(&vcl->ref_list, ref, list); |
488 |
40 |
Lck_Unlock(&vcl_mtx); |
489 |
|
|
490 |
40 |
VCL_Rel(&vcl); |
491 |
|
|
492 |
40 |
REPLACE(ref->desc, NULL); |
493 |
40 |
FREE_OBJ(ref); |
494 |
40 |
} |
495 |
|
|
496 |
|
struct vclref * |
497 |
3600 |
VRT_VCL_Prevent_Discard(VRT_CTX, const char *desc) |
498 |
|
{ |
499 |
|
struct vcl *vcl; |
500 |
|
struct vclref* ref; |
501 |
|
|
502 |
3600 |
ASSERT_CLI(); |
503 |
3600 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
504 |
3600 |
AN(desc); |
505 |
3600 |
AN(*desc); |
506 |
|
|
507 |
3600 |
vcl = ctx->vcl; |
508 |
3600 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
509 |
3600 |
assert(vcl->temp->is_warm); |
510 |
|
|
511 |
3600 |
ALLOC_OBJ(ref, VCLREF_MAGIC); |
512 |
3600 |
AN(ref); |
513 |
3600 |
ref->vcl = vcl; |
514 |
3600 |
REPLACE(ref->desc, desc); |
515 |
|
|
516 |
3600 |
Lck_Lock(&vcl_mtx); |
517 |
3600 |
VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list); |
518 |
3600 |
vcl->nrefs++; |
519 |
3600 |
Lck_Unlock(&vcl_mtx); |
520 |
|
|
521 |
3600 |
return (ref); |
522 |
|
} |
523 |
|
|
524 |
|
void |
525 |
557 |
VRT_VCL_Allow_Discard(struct vclref **refp) |
526 |
|
{ |
527 |
|
struct vcl *vcl; |
528 |
|
struct vclref *ref; |
529 |
|
|
530 |
557 |
TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC); |
531 |
557 |
vcl = ref->vcl; |
532 |
557 |
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); |
533 |
|
|
534 |
|
/* NB: A VCL may be released by a VMOD at any time, but it must happen |
535 |
|
* after a warmup and before the end of a cooldown. The release may or |
536 |
|
* may not happen while the same thread holds the temperature lock, so |
537 |
|
* instead we check that all references are gone in VCL_Nuke. |
538 |
|
*/ |
539 |
|
|
540 |
557 |
Lck_Lock(&vcl_mtx); |
541 |
557 |
assert(!VTAILQ_EMPTY(&vcl->ref_list)); |
542 |
557 |
VTAILQ_REMOVE(&vcl->ref_list, ref, list); |
543 |
557 |
vcl->nrefs--; |
544 |
|
/* No garbage collection here, for the same reasons as in VCL_Rel. */ |
545 |
557 |
Lck_Unlock(&vcl_mtx); |
546 |
|
|
547 |
557 |
REPLACE(ref->desc, NULL); |
548 |
557 |
FREE_OBJ(ref); |
549 |
557 |
} |
550 |
|
|
551 |
|
/*-------------------------------------------------------------------- |
552 |
|
*/ |
553 |
|
|
554 |
|
static int |
555 |
562933 |
req_poll(struct worker *wrk, struct req *req) |
556 |
|
{ |
557 |
|
struct req *top; |
558 |
|
|
559 |
|
/* NB: Since a fail transition leads to vcl_synth, the request may be |
560 |
|
* short-circuited twice. |
561 |
|
*/ |
562 |
562933 |
if (req->req_reset) { |
563 |
880 |
wrk->vpi->handling = VCL_RET_FAIL; |
564 |
880 |
return (-1); |
565 |
|
} |
566 |
|
|
567 |
562053 |
top = req->top->topreq; |
568 |
562053 |
CHECK_OBJ_NOTNULL(top, REQ_MAGIC); |
569 |
562053 |
CHECK_OBJ_NOTNULL(top->transport, TRANSPORT_MAGIC); |
570 |
|
|
571 |
562053 |
if (!FEATURE(FEATURE_VCL_REQ_RESET)) |
572 |
799 |
return (0); |
573 |
561254 |
if (top->transport->poll == NULL) |
574 |
546055 |
return (0); |
575 |
15199 |
if (top->transport->poll(top) >= 0) |
576 |
14319 |
return (0); |
577 |
|
|
578 |
880 |
VSLb_ts_req(req, "Reset", W_TIM_real(wrk)); |
579 |
880 |
wrk->stats->req_reset++; |
580 |
880 |
wrk->vpi->handling = VCL_RET_FAIL; |
581 |
880 |
req->req_reset = 1; |
582 |
880 |
return (-1); |
583 |
562933 |
} |
584 |
|
|
585 |
|
/*-------------------------------------------------------------------- |
586 |
|
* Method functions to call into VCL programs. |
587 |
|
* |
588 |
|
* Either the request or busyobject must be specified, but not both. |
589 |
|
* The workspace argument is where random VCL stuff gets space from. |
590 |
|
*/ |
591 |
|
|
592 |
|
static void |
593 |
745355 |
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo, |
594 |
|
void *specific, unsigned method, vcl_func_f *func, unsigned track_call) |
595 |
|
{ |
596 |
745355 |
uintptr_t rws = 0, aws; |
597 |
|
struct vrt_ctx ctx; |
598 |
|
struct vbitmap *vbm; |
599 |
|
void *p; |
600 |
|
size_t sz; |
601 |
|
|
602 |
745355 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
603 |
745355 |
INIT_OBJ(&ctx, VRT_CTX_MAGIC); |
604 |
745355 |
if (bo != NULL) { |
605 |
183992 |
CHECK_OBJ(bo, BUSYOBJ_MAGIC); |
606 |
183992 |
CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC); |
607 |
183992 |
VCL_Bo2Ctx(&ctx, bo); |
608 |
183992 |
} |
609 |
745355 |
if (req != NULL) { |
610 |
562915 |
if (bo != NULL) |
611 |
1200 |
assert(method == VCL_MET_PIPE); |
612 |
562915 |
CHECK_OBJ(req, REQ_MAGIC); |
613 |
562915 |
CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC); |
614 |
562915 |
CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC); |
615 |
562915 |
CHECK_OBJ_NOTNULL(req->top, REQTOP_MAGIC); |
616 |
562915 |
if (req_poll(wrk, req)) |
617 |
1758 |
return; |
618 |
561157 |
VCL_Req2Ctx(&ctx, req); |
619 |
561157 |
} |
620 |
743597 |
assert(ctx.now != 0); |
621 |
743597 |
ctx.specific = specific; |
622 |
743597 |
ctx.method = method; |
623 |
743597 |
if (track_call > 0) { |
624 |
1637 |
rws = WS_Snapshot(wrk->aws); |
625 |
1637 |
sz = VBITMAP_SZ(track_call); |
626 |
1637 |
p = WS_Alloc(wrk->aws, sz); |
627 |
|
// No use to attempt graceful failure, all VCL calls will fail |
628 |
1637 |
AN(p); |
629 |
1637 |
vbm = vbit_init(p, sz); |
630 |
1637 |
ctx.called = vbm; |
631 |
1637 |
} |
632 |
743597 |
aws = WS_Snapshot(wrk->aws); |
633 |
743597 |
wrk->cur_method = method; |
634 |
743597 |
wrk->seen_methods |= method; |
635 |
743597 |
AN(ctx.vsl); |
636 |
743597 |
VSLbs(ctx.vsl, SLT_VCL_call, TOSTRAND(VCL_Method_Name(method))); |
637 |
743597 |
func(&ctx, VSUB_STATIC, NULL); |
638 |
1487194 |
VSLbs(ctx.vsl, SLT_VCL_return, |
639 |
743597 |
TOSTRAND(VCL_Return_Name(wrk->vpi->handling))); |
640 |
743597 |
wrk->cur_method |= 1; // Magic marker |
641 |
743597 |
if (wrk->vpi->handling == VCL_RET_FAIL) |
642 |
5800 |
wrk->stats->vcl_fail++; |
643 |
|
|
644 |
|
/* |
645 |
|
* VCL/Vmods are not allowed to make permanent allocations from |
646 |
|
* wrk->aws, but they can reserve and return from it. |
647 |
|
*/ |
648 |
743597 |
assert(aws == WS_Snapshot(wrk->aws)); |
649 |
743597 |
if (rws != 0) |
650 |
1635 |
WS_Reset(wrk->aws, rws); |
651 |
745355 |
} |
652 |
|
|
653 |
|
#define VCL_MET_MAC(func, upper, typ, bitmap) \ |
654 |
|
void \ |
655 |
|
VCL_##func##_method(struct vcl *vcl, struct worker *wrk, \ |
656 |
|
struct req *req, struct busyobj *bo, void *specific) \ |
657 |
|
{ \ |
658 |
|
\ |
659 |
|
CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); \ |
660 |
|
CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC); \ |
661 |
|
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); \ |
662 |
|
vcl_call_method(wrk, req, bo, specific, \ |
663 |
|
VCL_MET_ ## upper, vcl->conf->func##_func, vcl->conf->nsub);\ |
664 |
|
AN((1U << wrk->vpi->handling) & bitmap); \ |
665 |
|
} |
666 |
|
|
667 |
|
#include "tbl/vcl_returns.h" |
668 |
|
|
669 |
|
/*-------------------------------------------------------------------- |
670 |
|
*/ |
671 |
|
|
672 |
|
VCL_STRING |
673 |
40 |
VRT_check_call(VRT_CTX, VCL_SUB sub) |
674 |
|
{ |
675 |
40 |
VCL_STRING err = NULL; |
676 |
|
enum vcl_func_fail_e fail; |
677 |
|
|
678 |
40 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
679 |
40 |
CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC); |
680 |
|
|
681 |
40 |
AN(sub->func); |
682 |
40 |
sub->func(ctx, VSUB_CHECK, &fail); |
683 |
|
|
684 |
40 |
switch (fail) { |
685 |
|
case VSUB_E_OK: |
686 |
0 |
break; |
687 |
|
case VSUB_E_METHOD: |
688 |
80 |
err = WS_Printf(ctx->ws, "Dynamic call to \"sub %s{}\"" |
689 |
40 |
" not allowed from here", sub->name); |
690 |
40 |
if (err == NULL) |
691 |
0 |
err = "Dynamic call not allowed and workspace overflow"; |
692 |
40 |
break; |
693 |
|
case VSUB_E_RECURSE: |
694 |
0 |
err = WS_Printf(ctx->ws, "Recursive dynamic call to" |
695 |
0 |
" \"sub %s{}\"", sub->name); |
696 |
0 |
if (err == NULL) |
697 |
0 |
err = "Recursive dynamic call and workspace overflow"; |
698 |
0 |
break; |
699 |
|
default: |
700 |
0 |
INCOMPL(); |
701 |
0 |
} |
702 |
|
|
703 |
40 |
return (err); |
704 |
|
} |
705 |
|
|
706 |
|
VCL_VOID |
707 |
560 |
VRT_call(VRT_CTX, VCL_SUB sub) |
708 |
|
{ |
709 |
|
|
710 |
560 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
711 |
560 |
CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC); |
712 |
|
|
713 |
560 |
AZ(VRT_handled(ctx)); |
714 |
560 |
AN(sub->func); |
715 |
560 |
sub->func(ctx, VSUB_DYNAMIC, NULL); |
716 |
560 |
} |