varnish-cache/bin/varnishd/cache/cache_vrt_vcl.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2016 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9 825
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15 164245
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 300
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 3600
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdio.h>
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vcl.h"
40
#include "vtim.h"
41
#include "vbm.h"
42
43
#include "cache_director.h"
44 105446
#include "cache_transport.h"
45
#include "cache_vcl.h"
46
#include "vcc_interface.h"
47
48
/*--------------------------------------------------------------------*/
49
50 60035
const char *
51 450795
VCL_Return_Name(unsigned r)
52
{
53
54 450795
        switch (r) {
55
#define VCL_RET_MAC(l, U, B)    \
56
        case VCL_RET_##U:       \
57
                return(#l);
58 0
#include "tbl/vcl_returns.h"
59
        default:
60 150
                return (NULL);
61
        }
62 450795
}
63 18649
64
const char *
65 451185
VCL_Method_Name(unsigned m)
66
{
67
68 451185
        switch (m) {
69
#define VCL_MET_MAC(func, upper, typ, bitmap)   \
70 1475
        case VCL_MET_##upper:                   \
71
                return (#upper);
72
#include "tbl/vcl_returns.h"
73
        default:
74 0
                return (NULL);
75 175
        }
76 451185
}
77
78
/*--------------------------------------------------------------------*/
79 1700
80
void
81 76788
VCL_Refresh(struct vcl **vcc)
82
{
83
84 76788
        while (vcl_active == NULL)
85 0
                (void)usleep(100000);
86
87 76788
        ASSERT_VCL_ACTIVE();
88 76786
        if (*vcc == vcl_active)
89 45059
                return;
90
91 32702
        VCL_Update(vcc, NULL);
92 76786
}
93
94 7125
void
95 76932
VCL_Recache(const struct worker *wrk, struct vcl **vclp)
96
{
97
98 76932
        AN(wrk);
99 76932
        AN(vclp);
100 76932
        CHECK_OBJ_NOTNULL(*vclp, VCL_MAGIC);
101 76932
        ASSERT_VCL_ACTIVE();
102
103 76934
        if (*vclp != vcl_active || wrk->wpriv->vcl == vcl_active) {
104 1749
                VCL_Rel(vclp);
105 1324
                return;
106
        }
107 75610
        if (wrk->wpriv->vcl != NULL)
108 0
                VCL_Rel(&wrk->wpriv->vcl);
109 75610
        wrk->wpriv->vcl = *vclp;
110 75610
        *vclp = NULL;
111 76934
}
112 8801
113
void
114 63450
VCL_Ref(struct vcl *vcl)
115
{
116
117 63450
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
118 63450
        assert(!vcl->temp->is_cold);
119 117872
        Lck_Lock(&vcl_mtx);
120 63450
        assert(vcl->busy > 0);
121 63450
        vcl->busy++;
122 63450
        Lck_Unlock(&vcl_mtx);
123 63450
}
124
125
void
126 118830
VCL_Rel(struct vcl **vcc)
127
{
128
        struct vcl *vcl;
129
130 70532
        TAKE_OBJ_NOTNULL(vcl, vcc, VCL_MAGIC);
131 70532
        Lck_Lock(&vcl_mtx);
132 70532
        assert(vcl->busy > 0);
133 70532
        vcl->busy--;
134
        /*
135 76076
         * We do not garbage collect discarded VCL's here, that happens
136
         * in VCL_Poll() which is called from the CLI thread.
137
         */
138 70532
        Lck_Unlock(&vcl_mtx);
139 70532
}
140
141
/*--------------------------------------------------------------------*/
142 0
143
static void
144 2643
vcldir_free(struct vcldir *vdir)
145
{
146 85716
147 2643
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
148 2643
        CHECK_OBJ_NOTNULL(vdir->dir, DIRECTOR_MAGIC);
149 2643
        AZ(vdir->refcnt);
150 2643
        Lck_Delete(&vdir->dlck);
151 28612
        free(vdir->cli_name);
152 2643
        FREE_OBJ(vdir->dir);
153 2643
        FREE_OBJ(vdir);
154 2643
}
155
156
static VCL_BACKEND
157 0
vcldir_surplus(struct vcldir *vdir)
158
{
159 0
160 0
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
161 0
        assert(vdir->refcnt == 1);
162 0
        vdir->refcnt = 0;
163 0
        vcldir_free(vdir);
164 33625
        return (NULL);
165
}
166
167
VCL_BACKEND
168 35275
VRT_AddDirector(VRT_CTX, const struct vdi_methods *m, void *priv,
169
    const char *fmt, ...)
170
{
171
        struct vsb *vsb;
172 18349
        struct vcl *vcl;
173
        struct vcldir *vdir;
174
        const struct vcltemp *temp;
175
        va_list ap;
176
        int i;
177
178 35275
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
179 36025
        CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC);
180 35275
        AN(fmt);
181 35275
        vcl = ctx->vcl;
182 35275
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
183
184
        // opportunistic, re-checked again under lock
185 35450
        if (vcl->temp == VCL_TEMP_COOLING && !DO_DEBUG(DBG_VTC_MODE))
186 0
                return (NULL);
187
188 35275
        ALLOC_OBJ(vdir, VCLDIR_MAGIC);
189 35275
        AN(vdir);
190 35275
        ALLOC_OBJ(vdir->dir, DIRECTOR_MAGIC);
191 122755
        AN(vdir->dir);
192 35275
        vdir->dir->vdir = vdir;
193
194 35275
        vdir->methods = m;
195 35275
        vdir->dir->priv = priv;
196 35275
        vsb = VSB_new_auto();
197 35275
        AN(vsb);
198 35275
        VSB_printf(vsb, "%s.", VCL_Name(vcl));
199 35275
        i = VSB_len(vsb);
200 35275
        va_start(ap, fmt);
201 35275
        VSB_vprintf(vsb, fmt, ap);
202 46799
        va_end(ap);
203 35275
        AZ(VSB_finish(vsb));
204 35275
        REPLACE(vdir->cli_name, VSB_data(vsb));
205 35275
        VSB_destroy(&vsb);
206 35275
        vdir->dir->vcl_name = vdir->cli_name + i;
207
208 35275
        vdir->vcl = vcl;
209 35275
        vdir->admin_health = VDI_AH_AUTO;
210 35275
        vdir->health_changed = VTIM_real();
211
212 35275
        vdir->refcnt++;
213 35275
        Lck_New(&vdir->dlck, lck_director);
214 35275
        vdir->dir->mtx = &vdir->dlck;
215
216
        /* NB: at this point we look at the VCL temperature after getting
217
         * through the trouble of creating the director even though it might
218
         * not be legal to do so. Because we change the VCL temperature before
219
         * sending COLD events we have to tolerate and undo attempts for the
220
         * COOLING case.
221
         *
222
         * To avoid deadlocks during vcl_BackendEvent, we only wait for vcl_mtx
223
         * if the vcl is busy (ref vcl_set_state())
224
         */
225
226 35275
        while (1) {
227 35275
                temp = vcl->temp;
228 35275
                if (temp == VCL_TEMP_COOLING)
229 0
                        return (vcldir_surplus(vdir));
230 35275
                if (vcl->busy == 0 && vcl->temp->is_warm) {
231 2100
                        if (! Lck_Trylock(&vcl_mtx))
232 2100
                                break;
233 0
                        usleep(10 * 1000);
234 0
                        continue;
235
                }
236 33175
                Lck_Lock(&vcl_mtx);
237 33175
                break;
238
        }
239 35275
        Lck_AssertHeld(&vcl_mtx);
240 35275
        temp = vcl->temp;
241 35275
        if (temp != VCL_TEMP_COOLING)
242 35275
                VTAILQ_INSERT_TAIL(&vcl->director_list, vdir, list);
243 35275
        if (temp->is_warm)
244 2450
                VDI_Event(vdir->dir, VCL_EVENT_WARM);
245 35275
        Lck_Unlock(&vcl_mtx);
246
247 35275
        if (temp == VCL_TEMP_COOLING)
248 0
                return (vcldir_surplus(vdir));
249
250 35275
        if (!temp->is_warm && temp != VCL_TEMP_INIT)
251 25
                WRONG("Dynamic Backends can only be added to warm VCLs");
252
253 35250
        return (vdir->dir);
254 35250
}
255
256
void
257 30825
VRT_StaticDirector(VCL_BACKEND b)
258
{
259
        struct vcldir *vdir;
260
261 30825
        CHECK_OBJ_NOTNULL(b, DIRECTOR_MAGIC);
262 30825
        vdir = b->vdir;
263 30825
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
264 30825
        assert(vdir->refcnt == 1);
265 30825
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
266 30825
        vdir->flags |= VDIR_FLG_NOREFCNT;
267 30825
}
268
269
static void
270 2643
vcldir_retire(struct vcldir *vdir)
271
{
272
        const struct vcltemp *temp;
273
274 2643
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
275 2643
        assert(vdir->refcnt == 0);
276 2643
        CHECK_OBJ_NOTNULL(vdir->vcl, VCL_MAGIC);
277
278 2643
        Lck_Lock(&vcl_mtx);
279 2643
        temp = vdir->vcl->temp;
280 2643
        VTAILQ_REMOVE(&vdir->vcl->director_list, vdir, list);
281 2643
        Lck_Unlock(&vcl_mtx);
282
283 2643
        if (temp->is_warm)
284 350
                VDI_Event(vdir->dir, VCL_EVENT_COLD);
285 2643
        if (vdir->methods->destroy != NULL)
286 2271
                vdir->methods->destroy(vdir->dir);
287 2643
        vcldir_free(vdir);
288 2643
}
289
290
static int
291 7841
vcldir_deref(struct vcldir *vdir)
292
{
293
        int busy;
294
295 7841
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
296 7841
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
297
298 7841
        Lck_Lock(&vdir->dlck);
299 7841
        assert(vdir->refcnt > 0);
300 7841
        busy = --vdir->refcnt;
301 7841
        Lck_Unlock(&vdir->dlck);
302
303 7841
        if (!busy)
304 2643
                vcldir_retire(vdir);
305 7841
        return (busy);
306
}
307
308
void
309 2293
VRT_DelDirector(VCL_BACKEND *dirp)
310
{
311
        VCL_BACKEND dir;
312
        struct vcldir *vdir;
313
314 2293
        TAKE_OBJ_NOTNULL(dir, dirp, DIRECTOR_MAGIC);
315
316 2293
        vdir = dir->vdir;
317 2293
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
318
319 2293
        if (vdir->methods->release != NULL)
320 325
                vdir->methods->release(vdir->dir);
321
322 2293
        if (vdir->flags & VDIR_FLG_NOREFCNT) {
323 1546
                vdir->flags &= ~VDIR_FLG_NOREFCNT;
324 1546
                AZ(vcldir_deref(vdir));
325 1546
        } else {
326 747
                (void) vcldir_deref(vdir);
327
        }
328 2293
}
329
330
void
331 407639
VRT_Assign_Backend(VCL_BACKEND *dst, VCL_BACKEND src)
332
{
333
        struct vcldir *vdir;
334
335 407639
        AN(dst);
336 407639
        CHECK_OBJ_ORNULL((*dst), DIRECTOR_MAGIC);
337 407639
        CHECK_OBJ_ORNULL(src, DIRECTOR_MAGIC);
338 407639
        if (*dst != NULL) {
339 198151
                vdir = (*dst)->vdir;
340 198151
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
341 198151
                if (!(vdir->flags & VDIR_FLG_NOREFCNT))
342 5549
                        (void)vcldir_deref(vdir);
343 198151
        }
344 407639
        if (src != NULL) {
345 199732
                vdir = src->vdir;
346 199732
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
347 199732
                if (!(vdir->flags & VDIR_FLG_NOREFCNT)) {
348 5325
                        Lck_Lock(&vdir->dlck);
349 5325
                        assert(vdir->refcnt > 0);
350 5325
                        vdir->refcnt++;
351 5325
                        Lck_Unlock(&vdir->dlck);
352 5325
                }
353 199732
        }
354 407639
        *dst = src;
355 407639
}
356
357
void
358 1896
VRT_DisableDirector(VCL_BACKEND d)
359
{
360
        struct vcldir *vdir;
361
362 1896
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
363 1896
        vdir = d->vdir;
364 1896
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
365
366 1896
        vdir->admin_health = VDI_AH_DELETED;
367 1896
        vdir->health_changed = VTIM_real();
368 1896
}
369
370
VCL_BACKEND
371 25
VRT_LookupDirector(VRT_CTX, VCL_STRING name)
372
{
373
        struct vcl *vcl;
374
        struct vcldir *vdir;
375 25
        VCL_BACKEND dd, d = NULL;
376
377 25
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
378 25
        AN(name);
379
380 25
        assert(ctx->method & VCL_MET_TASK_H);
381 25
        ASSERT_CLI();
382
383 25
        vcl = ctx->vcl;
384 25
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
385
386 25
        Lck_Lock(&vcl_mtx);
387 25
        VTAILQ_FOREACH(vdir, &vcl->director_list, list) {
388 25
                dd = vdir->dir;
389 25
                if (strcmp(dd->vcl_name, name))
390 0
                        continue;
391 25
                d = dd;
392 25
                break;
393
        }
394 25
        Lck_Unlock(&vcl_mtx);
395
396 25
        return (d);
397
}
398
399
/*--------------------------------------------------------------------*/
400
401
VCL_BACKEND
402 85989
VCL_DefaultDirector(const struct vcl *vcl)
403
{
404
405 85989
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
406 85989
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
407 85989
        return (*vcl->conf->default_director);
408
}
409
410
const char *
411 224734
VCL_Name(const struct vcl *vcl)
412
{
413
414 224734
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
415 224734
        return (vcl->loaded_name);
416
}
417
418
VCL_PROBE
419 31000
VCL_DefaultProbe(const struct vcl *vcl)
420
{
421
422 31000
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
423 31000
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
424 31000
        return (vcl->conf->default_probe);
425
}
426
427
/*--------------------------------------------------------------------*/
428
429
void
430 6748
VRT_CTX_Assert(VRT_CTX)
431
{
432 6748
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
433
434 6748
        if (ctx->msg != NULL)
435 550
                CHECK_OBJ(ctx->msg, VSB_MAGIC);
436
        else
437 6198
                AN(ctx->vsl);
438 6748
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
439 6748
        WS_Assert(ctx->ws);
440
441 6748
        CHECK_OBJ_ORNULL(ctx->sp, SESS_MAGIC);
442
443 6748
        CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC);
444 6748
        CHECK_OBJ_ORNULL(ctx->http_req, HTTP_MAGIC);
445 6748
        CHECK_OBJ_ORNULL(ctx->http_req_top, HTTP_MAGIC);
446 6748
        CHECK_OBJ_ORNULL(ctx->http_resp, HTTP_MAGIC);
447
448 6748
        CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC);
449 6748
        CHECK_OBJ_ORNULL(ctx->http_bereq, HTTP_MAGIC);
450 6748
        CHECK_OBJ_ORNULL(ctx->http_beresp, HTTP_MAGIC);
451 6748
}
452
453
struct vclref *
454 25
VRT_VCL_Prevent_Cold(VRT_CTX, const char *desc)
455
{
456
        struct vclref* ref;
457
458 25
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
459 25
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
460
461 25
        ALLOC_OBJ(ref, VCLREF_MAGIC);
462 25
        AN(ref);
463 25
        ref->vcl = ctx->vcl;
464 25
        REPLACE(ref->desc, desc);
465
466 25
        VCL_Ref(ctx->vcl);
467
468 25
        Lck_Lock(&vcl_mtx);
469 25
        VTAILQ_INSERT_TAIL(&ctx->vcl->ref_list, ref, list);
470 25
        Lck_Unlock(&vcl_mtx);
471
472 25
        return (ref);
473
}
474
475
void
476 25
VRT_VCL_Allow_Cold(struct vclref **refp)
477
{
478
        struct vcl *vcl;
479
        struct vclref *ref;
480
481 25
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
482 25
        vcl = ref->vcl;
483 25
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
484
485 25
        Lck_Lock(&vcl_mtx);
486 25
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
487 25
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
488 25
        Lck_Unlock(&vcl_mtx);
489
490 25
        VCL_Rel(&vcl);
491
492 25
        REPLACE(ref->desc, NULL);
493 25
        FREE_OBJ(ref);
494 25
}
495
496
struct vclref *
497 2075
VRT_VCL_Prevent_Discard(VRT_CTX, const char *desc)
498
{
499
        struct vcl *vcl;
500
        struct vclref* ref;
501
502 2075
        ASSERT_CLI();
503 2075
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
504 2075
        AN(desc);
505 2075
        AN(*desc);
506
507 2075
        vcl = ctx->vcl;
508 2075
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
509 2075
        assert(vcl->temp->is_warm);
510
511 2075
        ALLOC_OBJ(ref, VCLREF_MAGIC);
512 2075
        AN(ref);
513 2075
        ref->vcl = vcl;
514 2075
        REPLACE(ref->desc, desc);
515
516 2075
        Lck_Lock(&vcl_mtx);
517 2075
        VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list);
518 2075
        vcl->nrefs++;
519 2075
        Lck_Unlock(&vcl_mtx);
520
521 2075
        return (ref);
522
}
523
524
void
525 347
VRT_VCL_Allow_Discard(struct vclref **refp)
526
{
527
        struct vcl *vcl;
528
        struct vclref *ref;
529
530 347
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
531 347
        vcl = ref->vcl;
532 347
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
533
534
        /* NB: A VCL may be released by a VMOD at any time, but it must happen
535
         * after a warmup and before the end of a cooldown. The release may or
536
         * may not happen while the same thread holds the temperature lock, so
537
         * instead we check that all references are gone in VCL_Nuke.
538
         */
539
540 347
        Lck_Lock(&vcl_mtx);
541 347
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
542 347
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
543 347
        vcl->nrefs--;
544
        /* No garbage collection here, for the same reasons as in VCL_Rel. */
545 347
        Lck_Unlock(&vcl_mtx);
546
547 347
        REPLACE(ref->desc, NULL);
548 347
        FREE_OBJ(ref);
549 347
}
550
551
/*--------------------------------------------------------------------
552
 */
553
554
static int
555 340418
req_poll(struct worker *wrk, struct req *req)
556
{
557
        struct req *top;
558
559
        /* NB: Since a fail transition leads to vcl_synth, the request may be
560
         * short-circuited twice.
561
         */
562 340418
        if (req->req_reset) {
563 525
                wrk->vpi->handling = VCL_RET_FAIL;
564 525
                return (-1);
565
        }
566
567 339893
        top = req->top->topreq;
568 339893
        CHECK_OBJ_NOTNULL(top, REQ_MAGIC);
569 339893
        CHECK_OBJ_NOTNULL(top->transport, TRANSPORT_MAGIC);
570
571 339893
        if (!FEATURE(FEATURE_VCL_REQ_RESET))
572 399
                return (0);
573 339494
        if (top->transport->poll == NULL)
574 330196
                return (0);
575 9298
        if (top->transport->poll(top) >= 0)
576 8775
                return (0);
577
578 523
        VSLb_ts_req(req, "Reset", W_TIM_real(wrk));
579 523
        wrk->stats->req_reset++;
580 523
        wrk->vpi->handling = VCL_RET_FAIL;
581 523
        req->req_reset = 1;
582 523
        return (-1);
583 340418
}
584
585
/*--------------------------------------------------------------------
586
 * Method functions to call into VCL programs.
587
 *
588
 * Either the request or busyobject must be specified, but not both.
589
 * The workspace argument is where random VCL stuff gets space from.
590
 */
591
592
static void
593 451685
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo,
594
    void *specific, unsigned method, vcl_func_f *func, unsigned track_call)
595
{
596 451685
        uintptr_t rws = 0, aws;
597
        struct vrt_ctx ctx;
598
        struct vbitmap *vbm;
599
        void *p;
600
        size_t sz;
601
602 451685
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
603 451685
        INIT_OBJ(&ctx, VRT_CTX_MAGIC);
604 451685
        if (bo != NULL) {
605 112195
                CHECK_OBJ(bo, BUSYOBJ_MAGIC);
606 112195
                CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC);
607 112195
                VCL_Bo2Ctx(&ctx, bo);
608 112195
        }
609 451685
        if (req != NULL) {
610 340412
                if (bo != NULL)
611 750
                        assert(method == VCL_MET_PIPE);
612 340412
                CHECK_OBJ(req, REQ_MAGIC);
613 340412
                CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
614 340412
                CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC);
615 340412
                CHECK_OBJ_NOTNULL(req->top, REQTOP_MAGIC);
616 340412
                if (req_poll(wrk, req))
617 1049
                        return;
618 339363
                VCL_Req2Ctx(&ctx, req);
619 339363
        }
620 450636
        assert(ctx.now != 0);
621 450636
        ctx.specific = specific;
622 450636
        ctx.method = method;
623 450636
        if (track_call > 0) {
624 1022
                rws = WS_Snapshot(wrk->aws);
625 1022
                sz = VBITMAP_SZ(track_call);
626 1022
                p = WS_Alloc(wrk->aws, sz);
627
                // No use to attempt graceful failure, all VCL calls will fail
628 1022
                AN(p);
629 1022
                vbm = vbit_init(p, sz);
630 1022
                ctx.called = vbm;
631 1022
        }
632 450636
        aws = WS_Snapshot(wrk->aws);
633 450636
        wrk->cur_method = method;
634 450636
        wrk->seen_methods |= method;
635 450636
        AN(ctx.vsl);
636 450636
        VSLbs(ctx.vsl, SLT_VCL_call, TOSTRAND(VCL_Method_Name(method)));
637 450636
        func(&ctx, VSUB_STATIC, NULL);
638 901272
        VSLbs(ctx.vsl, SLT_VCL_return,
639 450636
            TOSTRAND(VCL_Return_Name(wrk->vpi->handling)));
640 450636
        wrk->cur_method |= 1;           // Magic marker
641 450636
        if (wrk->vpi->handling == VCL_RET_FAIL)
642 3600
                wrk->stats->vcl_fail++;
643
644
        /*
645
         * VCL/Vmods are not allowed to make permanent allocations from
646
         * wrk->aws, but they can reserve and return from it.
647
         */
648 450636
        assert(aws == WS_Snapshot(wrk->aws));
649 450636
        if (rws != 0)
650 1025
                WS_Reset(wrk->aws, rws);
651 451685
}
652
653
#define VCL_MET_MAC(func, upper, typ, bitmap)                           \
654
void                                                                    \
655
VCL_##func##_method(struct vcl *vcl, struct worker *wrk,                \
656
     struct req *req, struct busyobj *bo, void *specific)               \
657
{                                                                       \
658
                                                                        \
659
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);                              \
660
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);                   \
661
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);                           \
662
        vcl_call_method(wrk, req, bo, specific,                         \
663
            VCL_MET_ ## upper, vcl->conf->func##_func, vcl->conf->nsub);\
664
        AN((1U << wrk->vpi->handling) & bitmap);                        \
665
}
666
667
#include "tbl/vcl_returns.h"
668
669
/*--------------------------------------------------------------------
670
 */
671
672
VCL_STRING
673 25
VRT_check_call(VRT_CTX, VCL_SUB sub)
674
{
675 25
        VCL_STRING err = NULL;
676
        enum vcl_func_fail_e fail;
677
678 25
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
679 25
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
680
681 25
        AN(sub->func);
682 25
        sub->func(ctx, VSUB_CHECK, &fail);
683
684 25
        switch (fail) {
685
        case VSUB_E_OK:
686 0
                break;
687
        case VSUB_E_METHOD:
688 50
                err = WS_Printf(ctx->ws, "Dynamic call to \"sub %s{}\""
689 25
                    " not allowed from here", sub->name);
690 25
                if (err == NULL)
691 0
                        err = "Dynamic call not allowed and workspace overflow";
692 25
                break;
693
        case VSUB_E_RECURSE:
694 0
                err = WS_Printf(ctx->ws, "Recursive dynamic call to"
695 0
                    " \"sub %s{}\"", sub->name);
696 0
                if (err == NULL)
697 0
                        err = "Recursive dynamic call and workspace overflow";
698 0
                break;
699
        default:
700 0
                INCOMPL();
701 0
        }
702
703 25
        return (err);
704
}
705
706
VCL_VOID
707 350
VRT_call(VRT_CTX, VCL_SUB sub)
708
{
709
710 350
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
711 350
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
712
713 350
        AZ(VRT_handled(ctx));
714 350
        AN(sub->func);
715 350
        sub->func(ctx, VSUB_DYNAMIC, NULL);
716 350
}