varnish-cache/bin/varnishd/cache/cache_vrt_vcl.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2016 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdio.h>
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vcl.h"
40
#include "vtim.h"
41
#include "vbm.h"
42
43
#include "cache_director.h"
44
#include "cache_transport.h"
45
#include "cache_vcl.h"
46
#include "vcc_interface.h"
47
48
/*--------------------------------------------------------------------*/
49
50
const char *
51 749165
VCL_Return_Name(unsigned r)
52
{
53
54 749165
        switch (r) {
55
#define VCL_RET_MAC(l, U, B)    \
56
        case VCL_RET_##U:       \
57
                return(#l);
58
#include "tbl/vcl_returns.h"
59
        default:
60
                return (NULL);
61
        }
62 749165
}
63
64
const char *
65 749955
VCL_Method_Name(unsigned m)
66
{
67
68 749955
        switch (m) {
69
#define VCL_MET_MAC(func, upper, typ, bitmap)   \
70
        case VCL_MET_##upper:                   \
71
                return (#upper);
72
#include "tbl/vcl_returns.h"
73
        default:
74
                return (NULL);
75
        }
76 749955
}
77
78
/*--------------------------------------------------------------------*/
79
80
void
81 129234
VCL_Refresh(struct vcl **vcc)
82
{
83
84 129234
        while (vcl_active == NULL)
85 0
                (void)usleep(100000);
86
87 129234
        ASSERT_VCL_ACTIVE();
88 129204
        if (*vcc == vcl_active)
89 74852
                return;
90
91 54352
        VCL_Update(vcc, NULL);
92 129204
}
93
94
void
95 129416
VCL_Recache(const struct worker *wrk, struct vcl **vclp)
96
{
97
98 129416
        AN(wrk);
99 129416
        AN(vclp);
100 129416
        CHECK_OBJ_NOTNULL(*vclp, VCL_MAGIC);
101 129416
        ASSERT_VCL_ACTIVE();
102
103 129426
        if (*vclp != vcl_active || wrk->wpriv->vcl == vcl_active) {
104 2264
                VCL_Rel(vclp);
105 2264
                return;
106
        }
107 127162
        if (wrk->wpriv->vcl != NULL)
108 0
                VCL_Rel(&wrk->wpriv->vcl);
109 127162
        wrk->wpriv->vcl = *vclp;
110 127162
        *vclp = NULL;
111 129426
}
112
113
void
114 103880
VCL_Ref(struct vcl *vcl)
115
{
116
117 103880
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
118 103880
        assert(!vcl->temp->is_cold);
119 103880
        Lck_Lock(&vcl_mtx);
120 103880
        assert(vcl->busy > 0);
121 103880
        vcl->busy++;
122 103880
        Lck_Unlock(&vcl_mtx);
123 103880
}
124
125
void
126 116087
VCL_Rel(struct vcl **vcc)
127
{
128
        struct vcl *vcl;
129
130 116087
        TAKE_OBJ_NOTNULL(vcl, vcc, VCL_MAGIC);
131 116087
        Lck_Lock(&vcl_mtx);
132 116087
        assert(vcl->busy > 0);
133 116087
        vcl->busy--;
134
        /*
135
         * We do not garbage collect discarded VCL's here, that happens
136
         * in VCL_Poll() which is called from the CLI thread.
137
         */
138 116087
        Lck_Unlock(&vcl_mtx);
139 116087
}
140
141
/*--------------------------------------------------------------------*/
142
143
static void
144 4338
vcldir_free(struct vcldir *vdir)
145
{
146
147 4338
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
148 4338
        CHECK_OBJ_NOTNULL(vdir->dir, DIRECTOR_MAGIC);
149 4338
        AZ(vdir->refcnt);
150 4338
        Lck_Delete(&vdir->dlck);
151 4338
        free(vdir->cli_name);
152 4338
        FREE_OBJ(vdir->dir);
153 4338
        FREE_OBJ(vdir);
154 4338
}
155
156
static VCL_BACKEND
157 0
vcldir_surplus(struct vcldir *vdir)
158
{
159
160 0
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
161 0
        assert(vdir->refcnt == 1);
162 0
        vdir->refcnt = 0;
163 0
        vcldir_free(vdir);
164 0
        return (NULL);
165
}
166
167
VCL_BACKEND
168 58000
VRT_AddDirector(VRT_CTX, const struct vdi_methods *m, void *priv,
169
    const char *fmt, ...)
170
{
171
        struct vsb *vsb;
172
        struct vcl *vcl;
173
        struct vcldir *vdir;
174
        const struct vcltemp *temp;
175
        va_list ap;
176
        int i;
177
178 58000
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
179 58000
        CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC);
180 58000
        AN(fmt);
181 58000
        vcl = ctx->vcl;
182 58000
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
183
184
        // opportunistic, re-checked again under lock
185 58000
        if (vcl->temp == VCL_TEMP_COOLING && !DO_DEBUG(DBG_VTC_MODE))
186 0
                return (NULL);
187
188 58000
        ALLOC_OBJ(vdir, VCLDIR_MAGIC);
189 58000
        AN(vdir);
190 58000
        ALLOC_OBJ(vdir->dir, DIRECTOR_MAGIC);
191 58000
        AN(vdir->dir);
192 58000
        vdir->dir->vdir = vdir;
193
194 58000
        vdir->methods = m;
195 58000
        vdir->dir->priv = priv;
196 58000
        vsb = VSB_new_auto();
197 58000
        AN(vsb);
198 58000
        VSB_printf(vsb, "%s.", VCL_Name(vcl));
199 58000
        i = VSB_len(vsb);
200 58000
        va_start(ap, fmt);
201 58000
        VSB_vprintf(vsb, fmt, ap);
202 58000
        va_end(ap);
203 58000
        AZ(VSB_finish(vsb));
204 58000
        REPLACE(vdir->cli_name, VSB_data(vsb));
205 58000
        VSB_destroy(&vsb);
206 58000
        vdir->dir->vcl_name = vdir->cli_name + i;
207
208 58000
        vdir->vcl = vcl;
209 58000
        vdir->admin_health = VDI_AH_AUTO;
210 58000
        vdir->health_changed = VTIM_real();
211
212 58000
        vdir->refcnt++;
213 58000
        Lck_New(&vdir->dlck, lck_director);
214 58000
        vdir->dir->mtx = &vdir->dlck;
215
216
        /* NB: at this point we look at the VCL temperature after getting
217
         * through the trouble of creating the director even though it might
218
         * not be legal to do so. Because we change the VCL temperature before
219
         * sending COLD events we have to tolerate and undo attempts for the
220
         * COOLING case.
221
         *
222
         * To avoid deadlocks during vcl_BackendEvent, we only wait for vcl_mtx
223
         * if the vcl is busy (ref vcl_set_state())
224
         */
225
226 58000
        while (1) {
227 58000
                temp = vcl->temp;
228 58000
                if (temp == VCL_TEMP_COOLING)
229 0
                        return (vcldir_surplus(vdir));
230 58000
                if (vcl->busy == 0 && vcl->temp->is_warm) {
231 3680
                        if (! Lck_Trylock(&vcl_mtx))
232 3680
                                break;
233 0
                        usleep(10 * 1000);
234 0
                        continue;
235
                }
236 54320
                Lck_Lock(&vcl_mtx);
237 54320
                break;
238
        }
239 58000
        Lck_AssertHeld(&vcl_mtx);
240 58000
        temp = vcl->temp;
241 58000
        if (temp != VCL_TEMP_COOLING)
242 58000
                VTAILQ_INSERT_TAIL(&vcl->director_list, vdir, list);
243 58000
        if (temp->is_warm)
244 4240
                VDI_Event(vdir->dir, VCL_EVENT_WARM);
245 58000
        Lck_Unlock(&vcl_mtx);
246
247 58000
        if (temp == VCL_TEMP_COOLING)
248 0
                return (vcldir_surplus(vdir));
249
250 58000
        if (!temp->is_warm && temp != VCL_TEMP_INIT)
251 40
                WRONG("Dynamic Backends can only be added to warm VCLs");
252
253 57960
        return (vdir->dir);
254 57960
}
255
256
void
257 50560
VRT_StaticDirector(VCL_BACKEND b)
258
{
259
        struct vcldir *vdir;
260
261 50560
        CHECK_OBJ_NOTNULL(b, DIRECTOR_MAGIC);
262 50560
        vdir = b->vdir;
263 50560
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
264 50560
        assert(vdir->refcnt == 1);
265 50560
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
266 50560
        vdir->flags |= VDIR_FLG_NOREFCNT;
267 50560
}
268
269
static void
270 4338
vcldir_retire(struct vcldir *vdir)
271
{
272
        const struct vcltemp *temp;
273
274 4338
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
275 4338
        assert(vdir->refcnt == 0);
276 4338
        CHECK_OBJ_NOTNULL(vdir->vcl, VCL_MAGIC);
277
278 4338
        Lck_Lock(&vcl_mtx);
279 4338
        temp = vdir->vcl->temp;
280 4338
        VTAILQ_REMOVE(&vdir->vcl->director_list, vdir, list);
281 4338
        Lck_Unlock(&vcl_mtx);
282
283 4338
        if (temp->is_warm)
284 560
                VDI_Event(vdir->dir, VCL_EVENT_COLD);
285 4338
        if (vdir->methods->destroy != NULL)
286 3741
                vdir->methods->destroy(vdir->dir);
287 4338
        vcldir_free(vdir);
288 4338
}
289
290
static int
291 12655
vcldir_deref(struct vcldir *vdir)
292
{
293
        int busy;
294
295 12655
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
296 12655
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
297
298 12655
        Lck_Lock(&vdir->dlck);
299 12655
        assert(vdir->refcnt > 0);
300 12655
        busy = --vdir->refcnt;
301 12655
        Lck_Unlock(&vdir->dlck);
302
303 12655
        if (!busy)
304 4338
                vcldir_retire(vdir);
305 12655
        return (busy);
306
}
307
308
void
309 3778
VRT_DelDirector(VCL_BACKEND *dirp)
310
{
311
        VCL_BACKEND dir;
312
        struct vcldir *vdir;
313
314 3778
        TAKE_OBJ_NOTNULL(dir, dirp, DIRECTOR_MAGIC);
315
316 3778
        vdir = dir->vdir;
317 3778
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
318
319 3778
        if (vdir->methods->release != NULL)
320 520
                vdir->methods->release(vdir->dir);
321
322 3778
        if (vdir->flags & VDIR_FLG_NOREFCNT) {
323 2581
                vdir->flags &= ~VDIR_FLG_NOREFCNT;
324 2581
                AZ(vcldir_deref(vdir));
325 2581
        } else {
326 1197
                (void) vcldir_deref(vdir);
327
        }
328 3778
}
329
330
void
331 674242
VRT_Assign_Backend(VCL_BACKEND *dst, VCL_BACKEND src)
332
{
333
        struct vcldir *vdir;
334
        VCL_BACKEND tmp;
335
336 674242
        AN(dst);
337 674242
        CHECK_OBJ_ORNULL((*dst), DIRECTOR_MAGIC);
338 674242
        CHECK_OBJ_ORNULL(src, DIRECTOR_MAGIC);
339 674242
        if (*dst == src)
340 46115
                return;
341 628127
        tmp = *dst;
342 628127
        *dst = src;
343 628127
        if (src != NULL) {
344 319638
                vdir = src->vdir;
345 319638
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
346 319638
                if (!(vdir->flags & VDIR_FLG_NOREFCNT)) {
347 8520
                        Lck_Lock(&vdir->dlck);
348 8520
                        assert(vdir->refcnt > 0);
349 8520
                        vdir->refcnt++;
350 8520
                        Lck_Unlock(&vdir->dlck);
351 8520
                }
352 319638
        }
353 628127
        if (tmp != NULL) {
354 317275
                vdir = tmp->vdir;
355 317275
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
356 317275
                if (!(vdir->flags & VDIR_FLG_NOREFCNT))
357 8880
                        (void)vcldir_deref(vdir);
358 317275
        }
359 674242
}
360
361
void
362 3141
VRT_DisableDirector(VCL_BACKEND d)
363
{
364
        struct vcldir *vdir;
365
366 3141
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
367 3141
        vdir = d->vdir;
368 3141
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
369
370 3141
        vdir->admin_health = VDI_AH_DELETED;
371 3141
        vdir->health_changed = VTIM_real();
372 3141
}
373
374
VCL_BACKEND
375 40
VRT_LookupDirector(VRT_CTX, VCL_STRING name)
376
{
377
        struct vcl *vcl;
378
        struct vcldir *vdir;
379 40
        VCL_BACKEND dd, d = NULL;
380
381 40
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
382 40
        AN(name);
383
384 40
        assert(ctx->method & VCL_MET_TASK_H);
385 40
        ASSERT_CLI();
386
387 40
        vcl = ctx->vcl;
388 40
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
389
390 40
        Lck_Lock(&vcl_mtx);
391 40
        VTAILQ_FOREACH(vdir, &vcl->director_list, list) {
392 40
                dd = vdir->dir;
393 40
                if (strcmp(dd->vcl_name, name))
394 0
                        continue;
395 40
                d = dd;
396 40
                break;
397
        }
398 40
        Lck_Unlock(&vcl_mtx);
399
400 40
        return (d);
401
}
402
403
/*--------------------------------------------------------------------*/
404
405
VCL_BACKEND
406 144048
VCL_DefaultDirector(const struct vcl *vcl)
407
{
408
409 144048
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
410 144048
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
411 144048
        return (*vcl->conf->default_director);
412
}
413
414
const char *
415 372698
VCL_Name(const struct vcl *vcl)
416
{
417
418 372698
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
419 372698
        return (vcl->loaded_name);
420
}
421
422
VCL_PROBE
423 50840
VCL_DefaultProbe(const struct vcl *vcl)
424
{
425
426 50840
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
427 50840
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
428 50840
        return (vcl->conf->default_probe);
429
}
430
431
/*--------------------------------------------------------------------*/
432
433
void
434 13620
VRT_CTX_Assert(VRT_CTX)
435
{
436 13620
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
437
438 13620
        if (ctx->msg != NULL)
439 880
                CHECK_OBJ(ctx->msg, VSB_MAGIC);
440
        else
441 12740
                AN(ctx->vsl);
442 13620
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
443 13620
        WS_Assert(ctx->ws);
444
445 13620
        CHECK_OBJ_ORNULL(ctx->sp, SESS_MAGIC);
446
447 13620
        CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC);
448 13620
        CHECK_OBJ_ORNULL(ctx->http_req, HTTP_MAGIC);
449 13620
        CHECK_OBJ_ORNULL(ctx->http_req_top, HTTP_MAGIC);
450 13620
        CHECK_OBJ_ORNULL(ctx->http_resp, HTTP_MAGIC);
451
452 13620
        CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC);
453 13620
        CHECK_OBJ_ORNULL(ctx->http_bereq, HTTP_MAGIC);
454 13620
        CHECK_OBJ_ORNULL(ctx->http_beresp, HTTP_MAGIC);
455 13620
}
456
457
struct vclref *
458 40
VRT_VCL_Prevent_Cold(VRT_CTX, const char *desc)
459
{
460
        struct vclref* ref;
461
462 40
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
463 40
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
464
465 40
        ALLOC_OBJ(ref, VCLREF_MAGIC);
466 40
        AN(ref);
467 40
        ref->vcl = ctx->vcl;
468 40
        REPLACE(ref->desc, desc);
469
470 40
        VCL_Ref(ctx->vcl);
471
472 40
        Lck_Lock(&vcl_mtx);
473 40
        VTAILQ_INSERT_TAIL(&ctx->vcl->ref_list, ref, list);
474 40
        Lck_Unlock(&vcl_mtx);
475
476 40
        return (ref);
477
}
478
479
void
480 40
VRT_VCL_Allow_Cold(struct vclref **refp)
481
{
482
        struct vcl *vcl;
483
        struct vclref *ref;
484
485 40
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
486 40
        vcl = ref->vcl;
487 40
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
488
489 40
        Lck_Lock(&vcl_mtx);
490 40
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
491 40
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
492 40
        Lck_Unlock(&vcl_mtx);
493
494 40
        VCL_Rel(&vcl);
495
496 40
        REPLACE(ref->desc, NULL);
497 40
        FREE_OBJ(ref);
498 40
}
499
500
struct vclref *
501 3640
VRT_VCL_Prevent_Discard(VRT_CTX, const char *desc)
502
{
503
        struct vcl *vcl;
504
        struct vclref* ref;
505
506 3640
        ASSERT_CLI();
507 3640
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
508 3640
        AN(desc);
509 3640
        AN(*desc);
510
511 3640
        vcl = ctx->vcl;
512 3640
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
513 3640
        assert(vcl->temp->is_warm);
514
515 3640
        ALLOC_OBJ(ref, VCLREF_MAGIC);
516 3640
        AN(ref);
517 3640
        ref->vcl = vcl;
518 3640
        REPLACE(ref->desc, desc);
519
520 3640
        Lck_Lock(&vcl_mtx);
521 3640
        VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list);
522 3640
        vcl->nrefs++;
523 3640
        Lck_Unlock(&vcl_mtx);
524
525 3640
        return (ref);
526
}
527
528
void
529 557
VRT_VCL_Allow_Discard(struct vclref **refp)
530
{
531
        struct vcl *vcl;
532
        struct vclref *ref;
533
534 557
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
535 557
        vcl = ref->vcl;
536 557
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
537
538
        /* NB: A VCL may be released by a VMOD at any time, but it must happen
539
         * after a warmup and before the end of a cooldown. The release may or
540
         * may not happen while the same thread holds the temperature lock, so
541
         * instead we check that all references are gone in VCL_Nuke.
542
         */
543
544 557
        Lck_Lock(&vcl_mtx);
545 557
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
546 557
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
547 557
        vcl->nrefs--;
548
        /* No garbage collection here, for the same reasons as in VCL_Rel. */
549 557
        Lck_Unlock(&vcl_mtx);
550
551 557
        REPLACE(ref->desc, NULL);
552 557
        FREE_OBJ(ref);
553 557
}
554
555
/*--------------------------------------------------------------------
556
 */
557
558
static int
559 568051
req_poll(struct worker *wrk, struct req *req)
560
{
561
        struct req *top;
562
563
        /* NB: Since a fail transition leads to vcl_synth, the request may be
564
         * short-circuited twice.
565
         */
566 568051
        if (req->req_reset) {
567 879
                wrk->vpi->handling = VCL_RET_FAIL;
568 879
                return (-1);
569
        }
570
571 567172
        top = req->top->topreq;
572 567172
        CHECK_OBJ_NOTNULL(top, REQ_MAGIC);
573 567172
        CHECK_OBJ_NOTNULL(top->transport, TRANSPORT_MAGIC);
574
575 567172
        if (!FEATURE(FEATURE_VCL_REQ_RESET))
576 793
                return (0);
577 566379
        if (top->transport->poll == NULL)
578 551187
                return (0);
579 15192
        if (top->transport->poll(top) >= 0)
580 14314
                return (0);
581
582 878
        VSLb_ts_req(req, "Reset", W_TIM_real(wrk));
583 878
        wrk->stats->req_reset++;
584 878
        wrk->vpi->handling = VCL_RET_FAIL;
585 878
        req->req_reset = 1;
586 878
        return (-1);
587 568051
}
588
589
/*--------------------------------------------------------------------
590
 * Method functions to call into VCL programs.
591
 *
592
 * Either the request or busyobject must be specified, but not both.
593
 * The workspace argument is where random VCL stuff gets space from.
594
 */
595
596
static void
597 750594
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo,
598
    void *specific, unsigned method, vcl_func_f *func, unsigned track_call)
599
{
600 750594
        uintptr_t rws = 0, aws;
601
        struct vrt_ctx ctx;
602
        struct vbitmap *vbm;
603
        void *p;
604
        size_t sz;
605
606 750594
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
607 750594
        INIT_OBJ(&ctx, VRT_CTX_MAGIC);
608 750594
        if (bo != NULL) {
609 184071
                CHECK_OBJ(bo, BUSYOBJ_MAGIC);
610 184071
                CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC);
611 184071
                VCL_Bo2Ctx(&ctx, bo);
612 184071
        }
613 750594
        if (req != NULL) {
614 568047
                if (bo != NULL)
615 1200
                        assert(method == VCL_MET_PIPE);
616 568047
                CHECK_OBJ(req, REQ_MAGIC);
617 568047
                CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
618 568047
                CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC);
619 568047
                CHECK_OBJ_NOTNULL(req->top, REQTOP_MAGIC);
620 568047
                if (req_poll(wrk, req))
621 1754
                        return;
622 566293
                VCL_Req2Ctx(&ctx, req);
623 566293
        }
624 748840
        assert(ctx.now != 0);
625 748840
        ctx.specific = specific;
626 748840
        ctx.method = method;
627 748840
        if (track_call > 0) {
628 1634
                rws = WS_Snapshot(wrk->aws);
629 1634
                sz = VBITMAP_SZ(track_call);
630 1634
                p = WS_Alloc(wrk->aws, sz);
631
                // No use to attempt graceful failure, all VCL calls will fail
632 1634
                AN(p);
633 1634
                vbm = vbit_init(p, sz);
634 1634
                ctx.called = vbm;
635 1634
        }
636 748840
        aws = WS_Snapshot(wrk->aws);
637 748840
        wrk->cur_method = method;
638 748840
        wrk->seen_methods |= method;
639 748840
        AN(ctx.vsl);
640 748840
        VSLbs(ctx.vsl, SLT_VCL_call, TOSTRAND(VCL_Method_Name(method)));
641 748840
        func(&ctx, VSUB_STATIC, NULL);
642 1497680
        VSLbs(ctx.vsl, SLT_VCL_return,
643 748840
            TOSTRAND(VCL_Return_Name(wrk->vpi->handling)));
644 748840
        wrk->cur_method |= 1;           // Magic marker
645 748840
        if (wrk->vpi->handling == VCL_RET_FAIL)
646 5800
                wrk->stats->vcl_fail++;
647
648
        /*
649
         * VCL/Vmods are not allowed to make permanent allocations from
650
         * wrk->aws, but they can reserve and return from it.
651
         */
652 748840
        assert(aws == WS_Snapshot(wrk->aws));
653 748840
        if (rws != 0)
654 1639
                WS_Reset(wrk->aws, rws);
655 750594
}
656
657
#define VCL_MET_MAC(func, upper, typ, bitmap)                           \
658
void                                                                    \
659
VCL_##func##_method(struct vcl *vcl, struct worker *wrk,                \
660
     struct req *req, struct busyobj *bo, void *specific)               \
661
{                                                                       \
662
                                                                        \
663
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);                              \
664
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);                   \
665
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);                           \
666
        vcl_call_method(wrk, req, bo, specific,                         \
667
            VCL_MET_ ## upper, vcl->conf->func##_func, vcl->conf->nsub);\
668
        AN((1U << wrk->vpi->handling) & bitmap);                        \
669
}
670
671
#include "tbl/vcl_returns.h"
672
673
/*--------------------------------------------------------------------
674
 */
675
676
VCL_STRING
677 40
VRT_check_call(VRT_CTX, VCL_SUB sub)
678
{
679 40
        VCL_STRING err = NULL;
680
        enum vcl_func_fail_e fail;
681
682 40
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
683 40
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
684
685 40
        AN(sub->func);
686 40
        sub->func(ctx, VSUB_CHECK, &fail);
687
688 40
        switch (fail) {
689
        case VSUB_E_OK:
690 0
                break;
691
        case VSUB_E_METHOD:
692 80
                err = WS_Printf(ctx->ws, "Dynamic call to \"sub %s{}\""
693 40
                    " not allowed from here", sub->name);
694 40
                if (err == NULL)
695 0
                        err = "Dynamic call not allowed and workspace overflow";
696 40
                break;
697
        case VSUB_E_RECURSE:
698 0
                err = WS_Printf(ctx->ws, "Recursive dynamic call to"
699 0
                    " \"sub %s{}\"", sub->name);
700 0
                if (err == NULL)
701 0
                        err = "Recursive dynamic call and workspace overflow";
702 0
                break;
703
        default:
704 0
                INCOMPL();
705 0
        }
706
707 40
        return (err);
708
}
709
710
VCL_VOID
711 560
VRT_call(VRT_CTX, VCL_SUB sub)
712
{
713
714 560
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
715 560
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
716
717 560
        AZ(VRT_handled(ctx));
718 560
        AN(sub->func);
719 560
        sub->func(ctx, VSUB_DYNAMIC, NULL);
720 560
}