varnish-cache/bin/varnishd/cache/cache_vrt_vcl.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2016 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdio.h>
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vcl.h"
40
#include "vtim.h"
41
#include "vbm.h"
42
43
#include "cache_director.h"
44
#include "cache_transport.h"
45
#include "cache_vcl.h"
46
#include "vcc_interface.h"
47
48
/*--------------------------------------------------------------------*/
49
50
const char *
51 19157
VCL_Return_Name(unsigned r)
52
{
53
54 19157
        switch (r) {
55
#define VCL_RET_MAC(l, U, B)    \
56
        case VCL_RET_##U:       \
57
                return(#l);
58
#include "tbl/vcl_returns.h"
59
        default:
60
                return (NULL);
61
        }
62 19157
}
63
64
const char *
65 19178
VCL_Method_Name(unsigned m)
66
{
67
68 19178
        switch (m) {
69
#define VCL_MET_MAC(func, upper, typ, bitmap)   \
70
        case VCL_MET_##upper:                   \
71
                return (#upper);
72
#include "tbl/vcl_returns.h"
73
        default:
74
                return (NULL);
75
        }
76 19178
}
77
78
/*--------------------------------------------------------------------*/
79
80
void
81 3309
VCL_Refresh(struct vcl **vcc)
82
{
83
84 3309
        while (vcl_active == NULL)
85 0
                VTIM_sleep(0.1);
86
87 3309
        ASSERT_VCL_ACTIVE();
88 3309
        if (*vcc == vcl_active)
89 1908
                return;
90
91 1401
        VCL_Update(vcc, NULL);
92 3309
}
93
94
void
95 3312
VCL_Recache(const struct worker *wrk, struct vcl **vclp)
96
{
97
98 3312
        AN(wrk);
99 3312
        AN(vclp);
100 3312
        CHECK_OBJ_NOTNULL(*vclp, VCL_MAGIC);
101 3312
        ASSERT_VCL_ACTIVE();
102
103 3312
        if (*vclp != vcl_active || wrk->wpriv->vcl == vcl_active) {
104 56
                VCL_Rel(vclp);
105 56
                return;
106
        }
107 3256
        if (wrk->wpriv->vcl != NULL)
108 0
                VCL_Rel(&wrk->wpriv->vcl);
109 3256
        wrk->wpriv->vcl = *vclp;
110 3256
        *vclp = NULL;
111 3312
}
112
113
void
114 2648
VCL_Ref(struct vcl *vcl)
115
{
116
117 2648
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
118 2648
        assert(!vcl->temp->is_cold);
119 2648
        Lck_Lock(&vcl_mtx);
120 2648
        assert(vcl->busy > 0);
121 2648
        vcl->busy++;
122 2648
        Lck_Unlock(&vcl_mtx);
123 2648
}
124
125
void
126 2962
VCL_Rel(struct vcl **vcc)
127
{
128
        struct vcl *vcl;
129
130 2962
        TAKE_OBJ_NOTNULL(vcl, vcc, VCL_MAGIC);
131 2962
        Lck_Lock(&vcl_mtx);
132 2962
        assert(vcl->busy > 0);
133 2962
        vcl->busy--;
134
        /*
135
         * We do not garbage collect discarded VCL's here, that happens
136
         * in VCL_Poll() which is called from the CLI thread.
137
         */
138 2962
        Lck_Unlock(&vcl_mtx);
139 2962
}
140
141
/*--------------------------------------------------------------------*/
142
143
static void
144 108
vcldir_free(struct vcldir *vdir)
145
{
146
147 108
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
148 108
        CHECK_OBJ_NOTNULL(vdir->dir, DIRECTOR_MAGIC);
149 108
        AZ(vdir->refcnt);
150 108
        Lck_Delete(&vdir->dlck);
151 108
        free(vdir->cli_name);
152 108
        FREE_OBJ(vdir->dir);
153 108
        FREE_OBJ(vdir);
154 108
}
155
156
static VCL_BACKEND
157 0
vcldir_surplus(struct vcldir *vdir)
158
{
159
160 0
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
161 0
        assert(vdir->refcnt == 1);
162 0
        vdir->refcnt = 0;
163 0
        vcldir_free(vdir);
164 0
        return (NULL);
165
}
166
167
VCL_BACKEND
168 1476
VRT_AddDirector(VRT_CTX, const struct vdi_methods *m, void *priv,
169
    const char *fmt, ...)
170
{
171
        struct vsb *vsb;
172
        struct vcl *vcl;
173
        struct vcldir *vdir;
174
        const struct vcltemp *temp;
175
        va_list ap;
176
        int i;
177
178 1476
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
179 1476
        CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC);
180 1476
        AN(fmt);
181 1476
        vcl = ctx->vcl;
182 1476
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
183
184
        // opportunistic, re-checked again under lock
185 1476
        if (vcl->temp == VCL_TEMP_COOLING && !DO_DEBUG(DBG_VTC_MODE))
186 0
                return (NULL);
187
188 1476
        ALLOC_OBJ(vdir, VCLDIR_MAGIC);
189 1476
        AN(vdir);
190 1476
        ALLOC_OBJ(vdir->dir, DIRECTOR_MAGIC);
191 1476
        AN(vdir->dir);
192 1476
        vdir->dir->vdir = vdir;
193
194 1476
        vdir->methods = m;
195 1476
        vdir->dir->priv = priv;
196 1476
        vsb = VSB_new_auto();
197 1476
        AN(vsb);
198 1476
        VSB_printf(vsb, "%s.", VCL_Name(vcl));
199 1476
        i = VSB_len(vsb);
200 1476
        va_start(ap, fmt);
201 1476
        VSB_vprintf(vsb, fmt, ap);
202 1476
        va_end(ap);
203 1476
        AZ(VSB_finish(vsb));
204 1476
        REPLACE(vdir->cli_name, VSB_data(vsb));
205 1476
        VSB_destroy(&vsb);
206 1476
        vdir->dir->vcl_name = vdir->cli_name + i;
207
208 1476
        vdir->vcl = vcl;
209 1476
        vdir->admin_health = VDI_AH_AUTO;
210 1476
        vdir->health_changed = VTIM_real();
211
212 1476
        vdir->refcnt++;
213 1476
        Lck_New(&vdir->dlck, lck_director);
214 1476
        vdir->dir->mtx = &vdir->dlck;
215
216
        /* NB: at this point we look at the VCL temperature after getting
217
         * through the trouble of creating the director even though it might
218
         * not be legal to do so. Because we change the VCL temperature before
219
         * sending COLD events we have to tolerate and undo attempts for the
220
         * COOLING case.
221
         *
222
         * To avoid deadlocks during vcl_BackendEvent, we only wait for vcl_mtx
223
         * if the vcl is busy (ref vcl_set_state())
224
         */
225
226 1476
        while (1) {
227 1476
                temp = vcl->temp;
228 1476
                if (temp == VCL_TEMP_COOLING)
229 0
                        return (vcldir_surplus(vdir));
230 1476
                if (vcl->busy == 0 && vcl->temp->is_warm) {
231 93
                        if (! Lck_Trylock(&vcl_mtx))
232 93
                                break;
233 0
                        usleep(10 * 1000);
234 0
                        continue;
235
                }
236 1383
                Lck_Lock(&vcl_mtx);
237 1383
                break;
238
        }
239 1476
        Lck_AssertHeld(&vcl_mtx);
240 1476
        temp = vcl->temp;
241 1476
        if (temp != VCL_TEMP_COOLING)
242 1476
                VTAILQ_INSERT_TAIL(&vcl->vdire->directors, vdir, directors_list);
243 1476
        if (temp->is_warm)
244 107
                VDI_Event(vdir->dir, VCL_EVENT_WARM);
245 1476
        Lck_Unlock(&vcl_mtx);
246
247 1476
        if (temp == VCL_TEMP_COOLING)
248 0
                return (vcldir_surplus(vdir));
249
250 1476
        if (!temp->is_warm && temp != VCL_TEMP_INIT)
251 1
                WRONG("Dynamic Backends can only be added to warm VCLs");
252
253 1475
        return (vdir->dir);
254 1475
}
255
256
void
257 1289
VRT_StaticDirector(VCL_BACKEND b)
258
{
259
        struct vcldir *vdir;
260
261 1289
        CHECK_OBJ_NOTNULL(b, DIRECTOR_MAGIC);
262 1289
        vdir = b->vdir;
263 1289
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
264 1289
        assert(vdir->refcnt == 1);
265 1289
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
266 1289
        vdir->flags |= VDIR_FLG_NOREFCNT;
267 1289
}
268
269
// vcldir is already removed from the directors list
270
// to be called only from vdire_*
271
void
272 108
vcldir_retire(struct vcldir *vdir, const struct vcltemp *temp)
273
{
274
275 108
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
276 108
        assert(vdir->refcnt == 0);
277 108
        AN(temp);
278
279 108
        if (temp->is_warm)
280 14
                VDI_Event(vdir->dir, VCL_EVENT_COLD);
281 108
        if (vdir->methods->destroy != NULL)
282 93
                vdir->methods->destroy(vdir->dir);
283 108
        vcldir_free(vdir);
284 108
}
285
286
static int
287 316
vcldir_deref(struct vcldir *vdir)
288
{
289
        int busy;
290
291 316
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
292 316
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
293
294 316
        Lck_Lock(&vdir->dlck);
295 316
        assert(vdir->refcnt > 0);
296 316
        busy = --vdir->refcnt;
297 316
        Lck_Unlock(&vdir->dlck);
298
299 316
        if (!busy)
300 108
                vdire_resign(vdir->vcl->vdire, vdir);
301 316
        return (busy);
302
}
303
304
void
305 94
VRT_DelDirector(VCL_BACKEND *dirp)
306
{
307
        VCL_BACKEND dir;
308
        struct vcldir *vdir;
309
310 94
        TAKE_OBJ_NOTNULL(dir, dirp, DIRECTOR_MAGIC);
311
312 94
        vdir = dir->vdir;
313 94
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
314
315 94
        if (vdir->methods->release != NULL)
316 13
                vdir->methods->release(vdir->dir);
317
318 94
        if (vdir->flags & VDIR_FLG_NOREFCNT) {
319 64
                vdir->flags &= ~VDIR_FLG_NOREFCNT;
320 64
                AZ(vcldir_deref(vdir));
321 64
        } else {
322 30
                (void) vcldir_deref(vdir);
323
        }
324 94
}
325
326
void
327 17216
VRT_Assign_Backend(VCL_BACKEND *dst, VCL_BACKEND src)
328
{
329
        struct vcldir *vdir;
330
        VCL_BACKEND tmp;
331
332 17216
        AN(dst);
333 17216
        CHECK_OBJ_ORNULL((*dst), DIRECTOR_MAGIC);
334 17216
        CHECK_OBJ_ORNULL(src, DIRECTOR_MAGIC);
335 17216
        if (*dst == src)
336 1178
                return;
337 16038
        tmp = *dst;
338 16038
        *dst = src;
339 16038
        if (src != NULL) {
340 8159
                vdir = src->vdir;
341 8159
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
342 8159
                if (!(vdir->flags & VDIR_FLG_NOREFCNT)) {
343 213
                        Lck_Lock(&vdir->dlck);
344 213
                        assert(vdir->refcnt > 0);
345 213
                        vdir->refcnt++;
346 213
                        Lck_Unlock(&vdir->dlck);
347 213
                }
348 8159
        }
349 16038
        if (tmp != NULL) {
350 8099
                vdir = tmp->vdir;
351 8099
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
352 8099
                if (!(vdir->flags & VDIR_FLG_NOREFCNT))
353 222
                        (void)vcldir_deref(vdir);
354 8099
        }
355 17216
}
356
357
void
358 78
VRT_DisableDirector(VCL_BACKEND d)
359
{
360
        struct vcldir *vdir;
361
362 78
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
363 78
        vdir = d->vdir;
364 78
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
365
366 78
        vdir->admin_health = VDI_AH_DELETED;
367 78
        vdir->health_changed = VTIM_real();
368 78
}
369
370
VCL_BACKEND
371 1
VRT_LookupDirector(VRT_CTX, VCL_STRING name)
372
{
373
        struct vcl *vcl;
374
        struct vcldir *vdir;
375 1
        VCL_BACKEND dd, d = NULL;
376
        struct vdire *vdire;
377
378 1
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
379 1
        AN(name);
380
381 1
        assert(ctx->method & VCL_MET_TASK_H);
382 1
        ASSERT_CLI();
383
384 1
        vcl = ctx->vcl;
385 1
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
386
387 1
        vdire = vcl->vdire;
388
389 1
        vdire_start_iter(vdire);
390 1
        VTAILQ_FOREACH(vdir, &vdire->directors, directors_list) {
391 1
                dd = vdir->dir;
392 1
                if (strcmp(dd->vcl_name, name))
393 0
                        continue;
394 1
                d = dd;
395 1
                break;
396
        }
397 1
        vdire_end_iter(vdire);
398
399 1
        return (d);
400
}
401
402
/*--------------------------------------------------------------------*/
403
404
VCL_BACKEND
405 3681
VCL_DefaultDirector(const struct vcl *vcl)
406
{
407
408 3681
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
409 3681
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
410 3681
        return (*vcl->conf->default_director);
411
}
412
413
const char *
414 9515
VCL_Name(const struct vcl *vcl)
415
{
416
417 9515
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
418 9515
        return (vcl->loaded_name);
419
}
420
421
VCL_PROBE
422 1296
VCL_DefaultProbe(const struct vcl *vcl)
423
{
424
425 1296
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
426 1296
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
427 1296
        return (vcl->conf->default_probe);
428
}
429
430
/*--------------------------------------------------------------------*/
431
432
void
433 337
VRT_CTX_Assert(VRT_CTX)
434
{
435 337
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
436
437 337
        if (ctx->msg != NULL)
438 22
                CHECK_OBJ(ctx->msg, VSB_MAGIC);
439
        else
440 315
                AN(ctx->vsl);
441 337
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
442 337
        WS_Assert(ctx->ws);
443
444 337
        CHECK_OBJ_ORNULL(ctx->sp, SESS_MAGIC);
445
446 337
        CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC);
447 337
        CHECK_OBJ_ORNULL(ctx->http_req, HTTP_MAGIC);
448 337
        CHECK_OBJ_ORNULL(ctx->http_req_top, HTTP_MAGIC);
449 337
        CHECK_OBJ_ORNULL(ctx->http_resp, HTTP_MAGIC);
450
451 337
        CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC);
452 337
        CHECK_OBJ_ORNULL(ctx->http_bereq, HTTP_MAGIC);
453 337
        CHECK_OBJ_ORNULL(ctx->http_beresp, HTTP_MAGIC);
454 337
}
455
456
struct vclref *
457 1
VRT_VCL_Prevent_Cold(VRT_CTX, const char *desc)
458
{
459
        struct vclref* ref;
460
461 1
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
462 1
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
463
464 1
        ALLOC_OBJ(ref, VCLREF_MAGIC);
465 1
        AN(ref);
466 1
        ref->vcl = ctx->vcl;
467 1
        REPLACE(ref->desc, desc);
468
469 1
        VCL_Ref(ctx->vcl);
470
471 1
        Lck_Lock(&vcl_mtx);
472 1
        VTAILQ_INSERT_TAIL(&ctx->vcl->ref_list, ref, list);
473 1
        Lck_Unlock(&vcl_mtx);
474
475 1
        return (ref);
476
}
477
478
void
479 1
VRT_VCL_Allow_Cold(struct vclref **refp)
480
{
481
        struct vcl *vcl;
482
        struct vclref *ref;
483
484 1
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
485 1
        vcl = ref->vcl;
486 1
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
487
488 1
        Lck_Lock(&vcl_mtx);
489 1
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
490 1
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
491 1
        Lck_Unlock(&vcl_mtx);
492
493 1
        VCL_Rel(&vcl);
494
495 1
        REPLACE(ref->desc, NULL);
496 1
        FREE_OBJ(ref);
497 1
}
498
499
struct vclref *
500 92
VRT_VCL_Prevent_Discard(VRT_CTX, const char *desc)
501
{
502
        struct vcl *vcl;
503
        struct vclref* ref;
504
505 92
        ASSERT_CLI();
506 92
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
507 92
        AN(desc);
508 92
        AN(*desc);
509
510 92
        vcl = ctx->vcl;
511 92
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
512 92
        assert(vcl->temp->is_warm);
513
514 92
        ALLOC_OBJ(ref, VCLREF_MAGIC);
515 92
        AN(ref);
516 92
        ref->vcl = vcl;
517 92
        REPLACE(ref->desc, desc);
518
519 92
        Lck_Lock(&vcl_mtx);
520 92
        VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list);
521 92
        vcl->nrefs++;
522 92
        Lck_Unlock(&vcl_mtx);
523
524 92
        return (ref);
525
}
526
527
void
528 14
VRT_VCL_Allow_Discard(struct vclref **refp)
529
{
530
        struct vcl *vcl;
531
        struct vclref *ref;
532
533 14
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
534 14
        vcl = ref->vcl;
535 14
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
536
537
        /* NB: A VCL may be released by a VMOD at any time, but it must happen
538
         * after a warmup and before the end of a cooldown. The release may or
539
         * may not happen while the same thread holds the temperature lock, so
540
         * instead we check that all references are gone in VCL_Nuke.
541
         */
542
543 14
        Lck_Lock(&vcl_mtx);
544 14
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
545 14
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
546 14
        vcl->nrefs--;
547
        /* No garbage collection here, for the same reasons as in VCL_Rel. */
548 14
        Lck_Unlock(&vcl_mtx);
549
550 14
        REPLACE(ref->desc, NULL);
551 14
        FREE_OBJ(ref);
552 14
}
553
554
/*--------------------------------------------------------------------
555
 */
556
557
static int
558 14532
req_poll(struct worker *wrk, struct req *req)
559
{
560
        struct req *top;
561
562
        /* NB: Since a fail transition leads to vcl_synth, the request may be
563
         * short-circuited twice.
564
         */
565 14532
        if (req->req_reset) {
566 23
                wrk->vpi->handling = VCL_RET_FAIL;
567 23
                return (-1);
568
        }
569
570 14509
        top = req->top->topreq;
571 14509
        CHECK_OBJ_NOTNULL(top, REQ_MAGIC);
572 14509
        CHECK_OBJ_NOTNULL(top->transport, TRANSPORT_MAGIC);
573
574 14509
        if (!FEATURE(FEATURE_VCL_REQ_RESET))
575 20
                return (0);
576 14489
        if (top->transport->poll == NULL)
577 14027
                return (0);
578 462
        if (top->transport->poll(top) >= 0)
579 439
                return (0);
580
581 23
        VSLb_ts_req(req, "Reset", W_TIM_real(wrk));
582 23
        wrk->stats->req_reset++;
583 23
        wrk->vpi->handling = VCL_RET_FAIL;
584 23
        req->req_reset = 1;
585 23
        return (-1);
586 14532
}
587
588
/*--------------------------------------------------------------------
589
 * Method functions to call into VCL programs.
590
 *
591
 * Either the request or busyobject must be specified, but not both.
592
 * The workspace argument is where random VCL stuff gets space from.
593
 */
594
595
static void
596 19197
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo,
597
    void *specific, unsigned method, vcl_func_f *func, unsigned track_call)
598
{
599 19197
        uintptr_t rws = 0, aws;
600
        struct vrt_ctx ctx;
601
        struct vbitmap *vbm;
602
        void *p;
603
        size_t sz;
604
605 19197
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
606 19197
        INIT_OBJ(&ctx, VRT_CTX_MAGIC);
607 19197
        if (bo != NULL) {
608 4702
                CHECK_OBJ(bo, BUSYOBJ_MAGIC);
609 4702
                CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC);
610 4702
                VCL_Bo2Ctx(&ctx, bo);
611 4702
        }
612 19197
        if (req != NULL) {
613 14532
                if (bo != NULL)
614 30
                        assert(method == VCL_MET_PIPE);
615 14532
                CHECK_OBJ(req, REQ_MAGIC);
616 14532
                CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
617 14532
                CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC);
618 14532
                CHECK_OBJ_NOTNULL(req->top, REQTOP_MAGIC);
619 14532
                if (req_poll(wrk, req))
620 46
                        return;
621 14486
                VCL_Req2Ctx(&ctx, req);
622 14486
        }
623 19151
        assert(ctx.now != 0);
624 19151
        ctx.specific = specific;
625 19151
        ctx.method = method;
626 19151
        if (track_call > 0) {
627 41
                rws = WS_Snapshot(wrk->aws);
628 41
                sz = VBITMAP_SZ(track_call);
629 41
                p = WS_Alloc(wrk->aws, sz);
630
                // No use to attempt graceful failure, all VCL calls will fail
631 41
                AN(p);
632 41
                vbm = vbit_init(p, sz);
633 41
                ctx.called = vbm;
634 41
        }
635 19151
        aws = WS_Snapshot(wrk->aws);
636 19151
        wrk->cur_method = method;
637 19151
        wrk->seen_methods |= method;
638 19151
        AN(ctx.vsl);
639 19151
        VSLbs(ctx.vsl, SLT_VCL_call, TOSTRAND(VCL_Method_Name(method)));
640 19151
        func(&ctx, VSUB_STATIC, NULL);
641 38302
        VSLbs(ctx.vsl, SLT_VCL_return,
642 19151
            TOSTRAND(VCL_Return_Name(wrk->vpi->handling)));
643 19151
        wrk->cur_method |= 1;           // Magic marker
644 19151
        if (wrk->vpi->handling == VCL_RET_FAIL)
645 145
                wrk->stats->vcl_fail++;
646
647
        /*
648
         * VCL/Vmods are not allowed to make permanent allocations from
649
         * wrk->aws, but they can reserve and return from it.
650
         */
651 19151
        assert(aws == WS_Snapshot(wrk->aws));
652 19151
        if (rws != 0)
653 41
                WS_Reset(wrk->aws, rws);
654 19197
}
655
656
#define VCL_MET_MAC(func, upper, typ, bitmap)                           \
657
void                                                                    \
658
VCL_##func##_method(struct vcl *vcl, struct worker *wrk,                \
659
     struct req *req, struct busyobj *bo, void *specific)               \
660
{                                                                       \
661
                                                                        \
662
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);                              \
663
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);                   \
664
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);                           \
665
        vcl_call_method(wrk, req, bo, specific,                         \
666
            VCL_MET_ ## upper, vcl->conf->func##_func, vcl->conf->nsub);\
667
        AN((1U << wrk->vpi->handling) & bitmap);                        \
668
}
669
670
#include "tbl/vcl_returns.h"
671
672
/*--------------------------------------------------------------------
673
 */
674
675
VCL_STRING
676 1
VRT_check_call(VRT_CTX, VCL_SUB sub)
677
{
678 1
        VCL_STRING err = NULL;
679
        enum vcl_func_fail_e fail;
680
681 1
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
682 1
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
683
684 1
        AN(sub->func);
685 1
        sub->func(ctx, VSUB_CHECK, &fail);
686
687 1
        switch (fail) {
688
        case VSUB_E_OK:
689 0
                break;
690
        case VSUB_E_METHOD:
691 2
                err = WS_Printf(ctx->ws, "Dynamic call to \"sub %s{}\""
692 1
                    " not allowed from here", sub->name);
693 1
                if (err == NULL)
694 0
                        err = "Dynamic call not allowed and workspace overflow";
695 1
                break;
696
        case VSUB_E_RECURSE:
697 0
                err = WS_Printf(ctx->ws, "Recursive dynamic call to"
698 0
                    " \"sub %s{}\"", sub->name);
699 0
                if (err == NULL)
700 0
                        err = "Recursive dynamic call and workspace overflow";
701 0
                break;
702
        default:
703 0
                INCOMPL();
704 0
        }
705
706 1
        return (err);
707
}
708
709
VCL_VOID
710 14
VRT_call(VRT_CTX, VCL_SUB sub)
711
{
712
713 14
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
714 14
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
715
716 14
        AZ(VRT_handled(ctx));
717 14
        AN(sub->func);
718 14
        sub->func(ctx, VSUB_DYNAMIC, NULL);
719 14
}