varnish-cache/bin/varnishd/cache/cache_vrt_vcl.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2016 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdio.h>
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vcl.h"
40
#include "vtim.h"
41
#include "vbm.h"
42
43
#include "cache_director.h"
44
#include "cache_transport.h"
45
#include "cache_vcl.h"
46
#include "vcc_interface.h"
47
48
/*--------------------------------------------------------------------*/
49
50
const char *
51 94014
VCL_Return_Name(unsigned r)
52
{
53
54 94014
        switch (r) {
55
#define VCL_RET_MAC(l, U, B)    \
56
        case VCL_RET_##U:       \
57
                return(#l);
58
#include "tbl/vcl_returns.h"
59
        default:
60
                return (NULL);
61
        }
62 94014
}
63
64
const char *
65 94131
VCL_Method_Name(unsigned m)
66
{
67
68 94131
        switch (m) {
69
#define VCL_MET_MAC(func, upper, typ, bitmap)   \
70
        case VCL_MET_##upper:                   \
71
                return (#upper);
72
#include "tbl/vcl_returns.h"
73
        default:
74
                return (NULL);
75
        }
76 94131
}
77
78
/*--------------------------------------------------------------------*/
79
80
void
81 16221
VCL_Refresh(struct vcl **vcc)
82
{
83
84 16221
        while (vcl_active == NULL)
85 0
                (void)usleep(100000);
86
87 16221
        ASSERT_VCL_ACTIVE();
88 16219
        if (*vcc == vcl_active)
89 9382
                return;
90
91 6837
        VCL_Update(vcc, NULL);
92 16219
}
93
94
void
95 16242
VCL_Recache(const struct worker *wrk, struct vcl **vclp)
96
{
97
98 16242
        AN(wrk);
99 16242
        AN(vclp);
100 16242
        CHECK_OBJ_NOTNULL(*vclp, VCL_MAGIC);
101 16242
        ASSERT_VCL_ACTIVE();
102
103 16242
        if (*vclp != vcl_active || wrk->wpriv->vcl == vcl_active) {
104 279
                VCL_Rel(vclp);
105 279
                return;
106
        }
107 15963
        if (wrk->wpriv->vcl != NULL)
108 0
                VCL_Rel(&wrk->wpriv->vcl);
109 15963
        wrk->wpriv->vcl = *vclp;
110 15963
        *vclp = NULL;
111 16242
}
112
113
void
114 13040
VCL_Ref(struct vcl *vcl)
115
{
116
117 13040
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
118 13040
        assert(!vcl->temp->is_cold);
119 13040
        Lck_Lock(&vcl_mtx);
120 13040
        assert(vcl->busy > 0);
121 13040
        vcl->busy++;
122 13040
        Lck_Unlock(&vcl_mtx);
123 13040
}
124
125
void
126 14554
VCL_Rel(struct vcl **vcc)
127
{
128
        struct vcl *vcl;
129
130 14554
        TAKE_OBJ_NOTNULL(vcl, vcc, VCL_MAGIC);
131 14554
        Lck_Lock(&vcl_mtx);
132 14554
        assert(vcl->busy > 0);
133 14554
        vcl->busy--;
134
        /*
135
         * We do not garbage collect discarded VCL's here, that happens
136
         * in VCL_Poll() which is called from the CLI thread.
137
         */
138 14554
        Lck_Unlock(&vcl_mtx);
139 14554
}
140
141
/*--------------------------------------------------------------------*/
142
143
static void
144 539
vcldir_free(struct vcldir *vdir)
145
{
146
147 539
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
148 539
        CHECK_OBJ_NOTNULL(vdir->dir, DIRECTOR_MAGIC);
149 539
        AZ(vdir->refcnt);
150 539
        Lck_Delete(&vdir->dlck);
151 539
        free(vdir->cli_name);
152 539
        FREE_OBJ(vdir->dir);
153 539
        FREE_OBJ(vdir);
154 539
}
155
156
static VCL_BACKEND
157 0
vcldir_surplus(struct vcldir *vdir)
158
{
159
160 0
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
161 0
        assert(vdir->refcnt == 1);
162 0
        vdir->refcnt = 0;
163 0
        vcldir_free(vdir);
164 0
        return (NULL);
165
}
166
167
VCL_BACKEND
168 7310
VRT_AddDirector(VRT_CTX, const struct vdi_methods *m, void *priv,
169
    const char *fmt, ...)
170
{
171
        struct vsb *vsb;
172
        struct vcl *vcl;
173
        struct vcldir *vdir;
174
        const struct vcltemp *temp;
175
        va_list ap;
176
        int i;
177
178 7310
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
179 7310
        CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC);
180 7310
        AN(fmt);
181 7310
        vcl = ctx->vcl;
182 7310
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
183
184
        // opportunistic, re-checked again under lock
185 7310
        if (vcl->temp == VCL_TEMP_COOLING && !DO_DEBUG(DBG_VTC_MODE))
186 0
                return (NULL);
187
188 7310
        ALLOC_OBJ(vdir, VCLDIR_MAGIC);
189 7310
        AN(vdir);
190 7310
        ALLOC_OBJ(vdir->dir, DIRECTOR_MAGIC);
191 7310
        AN(vdir->dir);
192 7310
        vdir->dir->vdir = vdir;
193
194 7310
        vdir->methods = m;
195 7310
        vdir->dir->priv = priv;
196 7310
        vsb = VSB_new_auto();
197 7310
        AN(vsb);
198 7310
        VSB_printf(vsb, "%s.", VCL_Name(vcl));
199 7310
        i = VSB_len(vsb);
200 7310
        va_start(ap, fmt);
201 7310
        VSB_vprintf(vsb, fmt, ap);
202 7310
        va_end(ap);
203 7310
        AZ(VSB_finish(vsb));
204 7310
        REPLACE(vdir->cli_name, VSB_data(vsb));
205 7310
        VSB_destroy(&vsb);
206 7310
        vdir->dir->vcl_name = vdir->cli_name + i;
207
208 7310
        vdir->vcl = vcl;
209 7310
        vdir->admin_health = VDI_AH_AUTO;
210 7310
        vdir->health_changed = VTIM_real();
211
212 7310
        vdir->refcnt++;
213 7310
        Lck_New(&vdir->dlck, lck_director);
214 7310
        vdir->dir->mtx = &vdir->dlck;
215
216
        /* NB: at this point we look at the VCL temperature after getting
217
         * through the trouble of creating the director even though it might
218
         * not be legal to do so. Because we change the VCL temperature before
219
         * sending COLD events we have to tolerate and undo attempts for the
220
         * COOLING case.
221
         *
222
         * To avoid deadlocks during vcl_BackendEvent, we only wait for vcl_mtx
223
         * if the vcl is busy (ref vcl_set_state())
224
         */
225
226 7310
        while (1) {
227 7310
                temp = vcl->temp;
228 7310
                if (temp == VCL_TEMP_COOLING)
229 0
                        return (vcldir_surplus(vdir));
230 7310
                if (vcl->busy == 0 && vcl->temp->is_warm) {
231 460
                        if (! Lck_Trylock(&vcl_mtx))
232 460
                                break;
233 0
                        usleep(10 * 1000);
234 0
                        continue;
235
                }
236 6850
                Lck_Lock(&vcl_mtx);
237 6850
                break;
238
        }
239 7310
        Lck_AssertHeld(&vcl_mtx);
240 7310
        temp = vcl->temp;
241 7310
        if (temp != VCL_TEMP_COOLING)
242 7310
                VTAILQ_INSERT_TAIL(&vcl->vdire->directors, vdir, directors_list);
243 7310
        if (temp->is_warm)
244 530
                VDI_Event(vdir->dir, VCL_EVENT_WARM);
245 7310
        Lck_Unlock(&vcl_mtx);
246
247 7310
        if (temp == VCL_TEMP_COOLING)
248 0
                return (vcldir_surplus(vdir));
249
250 7310
        if (!temp->is_warm && temp != VCL_TEMP_INIT)
251 5
                WRONG("Dynamic Backends can only be added to warm VCLs");
252
253 7305
        return (vdir->dir);
254 7305
}
255
256
void
257 6380
VRT_StaticDirector(VCL_BACKEND b)
258
{
259
        struct vcldir *vdir;
260
261 6380
        CHECK_OBJ_NOTNULL(b, DIRECTOR_MAGIC);
262 6380
        vdir = b->vdir;
263 6380
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
264 6380
        assert(vdir->refcnt == 1);
265 6380
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
266 6380
        vdir->flags |= VDIR_FLG_NOREFCNT;
267 6380
}
268
269
// vcldir is already removed from the directors list
270
// to be called only from vdire_*
271
void
272 539
vcldir_retire(struct vcldir *vdir, const struct vcltemp *temp)
273
{
274
275 539
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
276 539
        assert(vdir->refcnt == 0);
277 539
        AN(temp);
278
279 539
        if (temp->is_warm)
280 70
                VDI_Event(vdir->dir, VCL_EVENT_COLD);
281 539
        if (vdir->methods->destroy != NULL)
282 464
                vdir->methods->destroy(vdir->dir);
283 539
        vcldir_free(vdir);
284 539
}
285
286
static int
287 1577
vcldir_deref(struct vcldir *vdir)
288
{
289
        int busy;
290
291 1577
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
292 1577
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
293
294 1577
        Lck_Lock(&vdir->dlck);
295 1577
        assert(vdir->refcnt > 0);
296 1577
        busy = --vdir->refcnt;
297 1577
        Lck_Unlock(&vdir->dlck);
298
299 1577
        if (!busy)
300 539
                vdire_resign(vdir->vcl->vdire, vdir);
301 1577
        return (busy);
302
}
303
304
void
305 469
VRT_DelDirector(VCL_BACKEND *dirp)
306
{
307
        VCL_BACKEND dir;
308
        struct vcldir *vdir;
309
310 469
        TAKE_OBJ_NOTNULL(dir, dirp, DIRECTOR_MAGIC);
311
312 469
        vdir = dir->vdir;
313 469
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
314
315 469
        if (vdir->methods->release != NULL)
316 65
                vdir->methods->release(vdir->dir);
317
318 469
        if (vdir->flags & VDIR_FLG_NOREFCNT) {
319 319
                vdir->flags &= ~VDIR_FLG_NOREFCNT;
320 319
                AZ(vcldir_deref(vdir));
321 319
        } else {
322 150
                (void) vcldir_deref(vdir);
323
        }
324 469
}
325
326
void
327 84632
VRT_Assign_Backend(VCL_BACKEND *dst, VCL_BACKEND src)
328
{
329
        struct vcldir *vdir;
330
        VCL_BACKEND tmp;
331
332 84632
        AN(dst);
333 84632
        CHECK_OBJ_ORNULL((*dst), DIRECTOR_MAGIC);
334 84632
        CHECK_OBJ_ORNULL(src, DIRECTOR_MAGIC);
335 84632
        if (*dst == src)
336 5765
                return;
337 78867
        tmp = *dst;
338 78867
        *dst = src;
339 78867
        if (src != NULL) {
340 40135
                vdir = src->vdir;
341 40135
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
342 40135
                if (!(vdir->flags & VDIR_FLG_NOREFCNT)) {
343 1065
                        Lck_Lock(&vdir->dlck);
344 1065
                        assert(vdir->refcnt > 0);
345 1065
                        vdir->refcnt++;
346 1065
                        Lck_Unlock(&vdir->dlck);
347 1065
                }
348 40135
        }
349 78867
        if (tmp != NULL) {
350 39831
                vdir = tmp->vdir;
351 39831
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
352 39831
                if (!(vdir->flags & VDIR_FLG_NOREFCNT))
353 1108
                        (void)vcldir_deref(vdir);
354 39831
        }
355 84632
}
356
357
void
358 389
VRT_DisableDirector(VCL_BACKEND d)
359
{
360
        struct vcldir *vdir;
361
362 389
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
363 389
        vdir = d->vdir;
364 389
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
365
366 389
        vdir->admin_health = VDI_AH_DELETED;
367 389
        vdir->health_changed = VTIM_real();
368 389
}
369
370
VCL_BACKEND
371 5
VRT_LookupDirector(VRT_CTX, VCL_STRING name)
372
{
373
        struct vcl *vcl;
374
        struct vcldir *vdir;
375 5
        VCL_BACKEND dd, d = NULL;
376
        struct vdire *vdire;
377
378 5
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
379 5
        AN(name);
380
381 5
        assert(ctx->method & VCL_MET_TASK_H);
382 5
        ASSERT_CLI();
383
384 5
        vcl = ctx->vcl;
385 5
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
386
387 5
        vdire = vcl->vdire;
388
389 5
        vdire_start_iter(vdire);
390 5
        VTAILQ_FOREACH(vdir, &vdire->directors, directors_list) {
391 5
                dd = vdir->dir;
392 5
                if (strcmp(dd->vcl_name, name))
393 0
                        continue;
394 5
                d = dd;
395 5
                break;
396
        }
397 5
        vdire_end_iter(vdire);
398
399 5
        return (d);
400
}
401
402
/*--------------------------------------------------------------------*/
403
404
VCL_BACKEND
405 18074
VCL_DefaultDirector(const struct vcl *vcl)
406
{
407
408 18074
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
409 18074
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
410 18074
        return (*vcl->conf->default_director);
411
}
412
413
const char *
414 46845
VCL_Name(const struct vcl *vcl)
415
{
416
417 46845
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
418 46845
        return (vcl->loaded_name);
419
}
420
421
VCL_PROBE
422 6415
VCL_DefaultProbe(const struct vcl *vcl)
423
{
424
425 6415
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
426 6415
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
427 6415
        return (vcl->conf->default_probe);
428
}
429
430
/*--------------------------------------------------------------------*/
431
432
void
433 1693
VRT_CTX_Assert(VRT_CTX)
434
{
435 1693
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
436
437 1693
        if (ctx->msg != NULL)
438 110
                CHECK_OBJ(ctx->msg, VSB_MAGIC);
439
        else
440 1583
                AN(ctx->vsl);
441 1693
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
442 1693
        WS_Assert(ctx->ws);
443
444 1693
        CHECK_OBJ_ORNULL(ctx->sp, SESS_MAGIC);
445
446 1693
        CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC);
447 1693
        CHECK_OBJ_ORNULL(ctx->http_req, HTTP_MAGIC);
448 1693
        CHECK_OBJ_ORNULL(ctx->http_req_top, HTTP_MAGIC);
449 1693
        CHECK_OBJ_ORNULL(ctx->http_resp, HTTP_MAGIC);
450
451 1693
        CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC);
452 1693
        CHECK_OBJ_ORNULL(ctx->http_bereq, HTTP_MAGIC);
453 1693
        CHECK_OBJ_ORNULL(ctx->http_beresp, HTTP_MAGIC);
454 1693
}
455
456
struct vclref *
457 5
VRT_VCL_Prevent_Cold(VRT_CTX, const char *desc)
458
{
459
        struct vclref* ref;
460
461 5
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
462 5
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
463
464 5
        ALLOC_OBJ(ref, VCLREF_MAGIC);
465 5
        AN(ref);
466 5
        ref->vcl = ctx->vcl;
467 5
        REPLACE(ref->desc, desc);
468
469 5
        VCL_Ref(ctx->vcl);
470
471 5
        Lck_Lock(&vcl_mtx);
472 5
        VTAILQ_INSERT_TAIL(&ctx->vcl->ref_list, ref, list);
473 5
        Lck_Unlock(&vcl_mtx);
474
475 5
        return (ref);
476
}
477
478
void
479 5
VRT_VCL_Allow_Cold(struct vclref **refp)
480
{
481
        struct vcl *vcl;
482
        struct vclref *ref;
483
484 5
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
485 5
        vcl = ref->vcl;
486 5
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
487
488 5
        Lck_Lock(&vcl_mtx);
489 5
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
490 5
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
491 5
        Lck_Unlock(&vcl_mtx);
492
493 5
        VCL_Rel(&vcl);
494
495 5
        REPLACE(ref->desc, NULL);
496 5
        FREE_OBJ(ref);
497 5
}
498
499
struct vclref *
500 455
VRT_VCL_Prevent_Discard(VRT_CTX, const char *desc)
501
{
502
        struct vcl *vcl;
503
        struct vclref* ref;
504
505 455
        ASSERT_CLI();
506 455
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
507 455
        AN(desc);
508 455
        AN(*desc);
509
510 455
        vcl = ctx->vcl;
511 455
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
512 455
        assert(vcl->temp->is_warm);
513
514 455
        ALLOC_OBJ(ref, VCLREF_MAGIC);
515 455
        AN(ref);
516 455
        ref->vcl = vcl;
517 455
        REPLACE(ref->desc, desc);
518
519 455
        Lck_Lock(&vcl_mtx);
520 455
        VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list);
521 455
        vcl->nrefs++;
522 455
        Lck_Unlock(&vcl_mtx);
523
524 455
        return (ref);
525
}
526
527
void
528 70
VRT_VCL_Allow_Discard(struct vclref **refp)
529
{
530
        struct vcl *vcl;
531
        struct vclref *ref;
532
533 70
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
534 70
        vcl = ref->vcl;
535 70
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
536
537
        /* NB: A VCL may be released by a VMOD at any time, but it must happen
538
         * after a warmup and before the end of a cooldown. The release may or
539
         * may not happen while the same thread holds the temperature lock, so
540
         * instead we check that all references are gone in VCL_Nuke.
541
         */
542
543 70
        Lck_Lock(&vcl_mtx);
544 70
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
545 70
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
546 70
        vcl->nrefs--;
547
        /* No garbage collection here, for the same reasons as in VCL_Rel. */
548 70
        Lck_Unlock(&vcl_mtx);
549
550 70
        REPLACE(ref->desc, NULL);
551 70
        FREE_OBJ(ref);
552 70
}
553
554
/*--------------------------------------------------------------------
555
 */
556
557
static int
558 71279
req_poll(struct worker *wrk, struct req *req)
559
{
560
        struct req *top;
561
562
        /* NB: Since a fail transition leads to vcl_synth, the request may be
563
         * short-circuited twice.
564
         */
565 71279
        if (req->req_reset) {
566 110
                wrk->vpi->handling = VCL_RET_FAIL;
567 110
                return (-1);
568
        }
569
570 71169
        top = req->top->topreq;
571 71169
        CHECK_OBJ_NOTNULL(top, REQ_MAGIC);
572 71169
        CHECK_OBJ_NOTNULL(top->transport, TRANSPORT_MAGIC);
573
574 71169
        if (!FEATURE(FEATURE_VCL_REQ_RESET))
575 100
                return (0);
576 71069
        if (top->transport->poll == NULL)
577 69169
                return (0);
578 1900
        if (top->transport->poll(top) >= 0)
579 1790
                return (0);
580
581 110
        VSLb_ts_req(req, "Reset", W_TIM_real(wrk));
582 110
        wrk->stats->req_reset++;
583 110
        wrk->vpi->handling = VCL_RET_FAIL;
584 110
        req->req_reset = 1;
585 110
        return (-1);
586 71279
}
587
588
/*--------------------------------------------------------------------
589
 * Method functions to call into VCL programs.
590
 *
591
 * Either the request or busyobject must be specified, but not both.
592
 * The workspace argument is where random VCL stuff gets space from.
593
 */
594
595
static void
596 94198
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo,
597
    void *specific, unsigned method, vcl_func_f *func, unsigned track_call)
598
{
599 94198
        uintptr_t rws = 0, aws;
600
        struct vrt_ctx ctx;
601
        struct vbitmap *vbm;
602
        void *p;
603
        size_t sz;
604
605 94198
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
606 94198
        INIT_OBJ(&ctx, VRT_CTX_MAGIC);
607 94198
        if (bo != NULL) {
608 23120
                CHECK_OBJ(bo, BUSYOBJ_MAGIC);
609 23120
                CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC);
610 23120
                VCL_Bo2Ctx(&ctx, bo);
611 23120
        }
612 94198
        if (req != NULL) {
613 71276
                if (bo != NULL)
614 150
                        assert(method == VCL_MET_PIPE);
615 71276
                CHECK_OBJ(req, REQ_MAGIC);
616 71276
                CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
617 71276
                CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC);
618 71276
                CHECK_OBJ_NOTNULL(req->top, REQTOP_MAGIC);
619 71276
                if (req_poll(wrk, req))
620 220
                        return;
621 71056
                VCL_Req2Ctx(&ctx, req);
622 71056
        }
623 93978
        assert(ctx.now != 0);
624 93978
        ctx.specific = specific;
625 93978
        ctx.method = method;
626 93978
        if (track_call > 0) {
627 204
                rws = WS_Snapshot(wrk->aws);
628 204
                sz = VBITMAP_SZ(track_call);
629 204
                p = WS_Alloc(wrk->aws, sz);
630
                // No use to attempt graceful failure, all VCL calls will fail
631 204
                AN(p);
632 204
                vbm = vbit_init(p, sz);
633 204
                ctx.called = vbm;
634 204
        }
635 93978
        aws = WS_Snapshot(wrk->aws);
636 93978
        wrk->cur_method = method;
637 93978
        wrk->seen_methods |= method;
638 93978
        AN(ctx.vsl);
639 93978
        VSLbs(ctx.vsl, SLT_VCL_call, TOSTRAND(VCL_Method_Name(method)));
640 93978
        func(&ctx, VSUB_STATIC, NULL);
641 187956
        VSLbs(ctx.vsl, SLT_VCL_return,
642 93978
            TOSTRAND(VCL_Return_Name(wrk->vpi->handling)));
643 93978
        wrk->cur_method |= 1;           // Magic marker
644 93978
        if (wrk->vpi->handling == VCL_RET_FAIL)
645 725
                wrk->stats->vcl_fail++;
646
647
        /*
648
         * VCL/Vmods are not allowed to make permanent allocations from
649
         * wrk->aws, but they can reserve and return from it.
650
         */
651 93978
        assert(aws == WS_Snapshot(wrk->aws));
652 93978
        if (rws != 0)
653 203
                WS_Reset(wrk->aws, rws);
654 94198
}
655
656
#define VCL_MET_MAC(func, upper, typ, bitmap)                           \
657
void                                                                    \
658
VCL_##func##_method(struct vcl *vcl, struct worker *wrk,                \
659
     struct req *req, struct busyobj *bo, void *specific)               \
660
{                                                                       \
661
                                                                        \
662
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);                              \
663
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);                   \
664
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);                           \
665
        vcl_call_method(wrk, req, bo, specific,                         \
666
            VCL_MET_ ## upper, vcl->conf->func##_func, vcl->conf->nsub);\
667
        AN((1U << wrk->vpi->handling) & bitmap);                        \
668
}
669
670
#include "tbl/vcl_returns.h"
671
672
/*--------------------------------------------------------------------
673
 */
674
675
VCL_STRING
676 5
VRT_check_call(VRT_CTX, VCL_SUB sub)
677
{
678 5
        VCL_STRING err = NULL;
679
        enum vcl_func_fail_e fail;
680
681 5
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
682 5
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
683
684 5
        AN(sub->func);
685 5
        sub->func(ctx, VSUB_CHECK, &fail);
686
687 5
        switch (fail) {
688
        case VSUB_E_OK:
689 0
                break;
690
        case VSUB_E_METHOD:
691 10
                err = WS_Printf(ctx->ws, "Dynamic call to \"sub %s{}\""
692 5
                    " not allowed from here", sub->name);
693 5
                if (err == NULL)
694 0
                        err = "Dynamic call not allowed and workspace overflow";
695 5
                break;
696
        case VSUB_E_RECURSE:
697 0
                err = WS_Printf(ctx->ws, "Recursive dynamic call to"
698 0
                    " \"sub %s{}\"", sub->name);
699 0
                if (err == NULL)
700 0
                        err = "Recursive dynamic call and workspace overflow";
701 0
                break;
702
        default:
703 0
                INCOMPL();
704 0
        }
705
706 5
        return (err);
707
}
708
709
VCL_VOID
710 70
VRT_call(VRT_CTX, VCL_SUB sub)
711
{
712
713 70
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
714 70
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
715
716 70
        AZ(VRT_handled(ctx));
717 70
        AN(sub->func);
718 70
        sub->func(ctx, VSUB_DYNAMIC, NULL);
719 70
}