varnish-cache/lib/libvmod_directors/vmod_shard.c
1
/*-
2
 * Copyright 2009-2018 UPLEX - Nils Goroll Systemoptimierung
3
 * All rights reserved.
4
 *
5
 * Authors: Julian Wiesener <jw@uplex.de>
6
 *          Nils Goroll <slink@uplex.de>
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
#include "config.h"
31
32
#include <stdlib.h>
33
#include <string.h>
34
35
#include "cache/cache.h"
36
#include "vcl.h"
37
38
#include "vend.h"
39
40
#include "vcc_if.h"
41
#include "shard_dir.h"
42
#include "shard_cfg.h"
43
44
/* -------------------------------------------------------------------------
45
 *  shard director: LAZY mode (vdi resolve function), parameter objects
46
 *
47
 *  By associating a parameter object with a shard director, we enable LAZY
48
 *  lookups as with the other directors. Parameter objects are defined with VCL
49
 *  scope (normal vmod objects), but can be overridden per backend request using
50
 *  a task priv.
51
 *
52
 *  We use the same concept to carry shard.backend() parameters to vdi resolve
53
 *  for LAZY mode: They get saved in a per-director task scope parameter object.
54
 *
55
 *  Each object points to another object providing defaults for values which are
56
 *  not defined.
57
 *
58
 *  Actual resolution of the various parameter objects does not happen before
59
 *  they are used, which enabled changing them independently (ie, shard
60
 *  .backend() parameters have precedence over an associated parameter object,
61
 *  which by itself can be overridden).
62
 *
63
 *  Overview of parameter objects (pointers are alternatives)
64
 *
65
 *  shard() director        shard_param() object    default praram
66
 *
67
 *               --------------------------------->   vmod static
68
 *    VCL obj   /                                ->
69
 *    .param  -+--------->    VCL obj           /  _
70
 *                            .default  --------   /|
71
 *                                                /
72
 *                               ^               /
73
 *                               |              /
74
 *                                             /
75
 *                            .default        /
76
 *          ------------->    TASK priv      /
77
 *         /                                /
78
 *    .default -----------------------------
79
 *    TASK priv
80
 */
81
82
/* -------------------------------------------------------------------------
83
 * method arguments and set parameters bitmask in vmod_directors_shard_param
84
 */
85
86
#define arg_by          ((uint32_t)1)
87
#define arg_key         ((uint32_t)1 << 1)
88
#define arg_key_blob    ((uint32_t)1 << 2)
89
#define arg_alt         ((uint32_t)1 << 3)
90
#define arg_warmup      ((uint32_t)1 << 4)
91
#define arg_rampup      ((uint32_t)1 << 5)
92
#define arg_healthy     ((uint32_t)1 << 6)
93
#define arg_param       ((uint32_t)1 << 7)
94
#define arg_resolve     ((uint32_t)1 << 8)
95
#define _arg_mask       ((arg_resolve << 1) - 1)
96
/* allowed in shard_param.set */
97
#define _arg_mask_set   (arg_param - 1)
98
/* allowed in shard_param */
99
#define _arg_mask_param ( _arg_mask_set         \
100
                          & ~arg_key                    \
101
                          & ~arg_key_blob )
102
103
/* -------------------------------------------------------------------------
104
 * shard parameters - declaration & defaults
105
 */
106
enum vmod_directors_shard_param_scope {
107
        _SCOPE_INVALID = 0,
108
        SCOPE_VMOD,
109
        SCOPE_VCL,
110
        SCOPE_TASK,
111
        SCOPE_STACK
112
};
113
114
struct vmod_directors_shard_param;
115
116
struct vmod_directors_shard_param {
117
        unsigned                                magic;
118
#define VMOD_SHARD_SHARD_PARAM_MAGIC            0xdf5ca117
119
120
        /* internals */
121
        uint32_t                                key;
122
        const char                              *vcl_name;
123
        const struct vmod_directors_shard_param *defaults;
124
        enum vmod_directors_shard_param_scope   scope;
125
126
        /* parameters */
127
        enum by_e                               by;
128
        enum healthy_e                          healthy;
129
        uint32_t                                mask;
130
        VCL_BOOL                                rampup;
131
        VCL_INT                                 alt;
132
        VCL_REAL                                warmup;
133
};
134
135
static const struct vmod_directors_shard_param shard_param_default = {
136
        .magic          = VMOD_SHARD_SHARD_PARAM_MAGIC,
137
138
        .key            = 0,
139
        .vcl_name       = "builtin defaults",
140
        .defaults       = NULL,
141
        .scope          = SCOPE_VMOD,
142
143
        .mask           = _arg_mask_param,
144
        .by             = BY_HASH,
145
        .healthy        = CHOSEN,
146
        .rampup = 1,
147
        .alt            = 0,
148
        .warmup         = -1,
149
};
150
151
static struct vmod_directors_shard_param *
152
shard_param_stack(struct vmod_directors_shard_param *p,
153
    const struct vmod_directors_shard_param *pa, const char *who);
154
155
static struct vmod_directors_shard_param *
156
shard_param_task(VRT_CTX, const void *id,
157
    const struct vmod_directors_shard_param *pa);
158
159
static const struct vmod_directors_shard_param *
160
shard_param_blob(const VCL_BLOB blob);
161
162
static const struct vmod_directors_shard_param *
163
vmod_shard_param_read(VRT_CTX, const void *id,
164
    const struct vmod_directors_shard_param *p,
165
    struct vmod_directors_shard_param *pstk, const char *who);
166
167
/* -------------------------------------------------------------------------
168
 * shard vmod interface
169
 */
170
static vdi_healthy_f vmod_shard_healthy;
171
static vdi_resolve_f vmod_shard_resolve;
172
173
struct vmod_directors_shard {
174
        unsigned                                magic;
175
#define VMOD_SHARD_SHARD_MAGIC                  0x6e63e1bf
176
        struct sharddir                         *shardd;
177
        VCL_BACKEND                             dir;
178
};
179
180
static enum by_e
181 464
parse_by_e(VCL_ENUM e)
182
{
183
#define VMODENUM(n) if (e == vmod_enum_ ## n) return(BY_ ## n);
184
#include "tbl_by.h"
185 0
       WRONG("illegal by enum");
186
}
187
188
static enum healthy_e
189 48
parse_healthy_e(VCL_ENUM e)
190
{
191
#define VMODENUM(n) if (e == vmod_enum_ ## n) return(n);
192
#include "tbl_healthy.h"
193 0
       WRONG("illegal healthy enum");
194
}
195
196
static enum resolve_e
197 48
parse_resolve_e(VCL_ENUM e)
198
{
199
#define VMODENUM(n) if (e == vmod_enum_ ## n) return(n);
200
#include "tbl_resolve.h"
201 0
       WRONG("illegal resolve enum");
202
}
203
204
static const char * const by_str[_BY_E_MAX] = {
205
        [_BY_E_INVALID] = "*INVALID*",
206
#define VMODENUM(n) [BY_ ## n] = #n,
207
#include "tbl_by.h"
208
};
209
210
static const char * const healthy_str[_HEALTHY_E_MAX] = {
211
        [_HEALTHY_E_INVALID] = "*INVALID*",
212
#define VMODENUM(n) [n] = #n,
213
#include "tbl_healthy.h"
214
};
215
216
static void
217 84
shard__assert(void)
218
{
219
        VCL_INT t1;
220
        uint32_t t2a, t2b;
221
222
        /* we put our uint32 key in a VCL_INT container */
223
        assert(sizeof(VCL_INT) >= sizeof(uint32_t));
224 84
        t2a = UINT32_MAX;
225 84
        t1 = (VCL_INT)t2a;
226 84
        t2b = (uint32_t)t1;
227 84
        assert(t2a == t2b);
228 84
}
229
230
static void v_matchproto_(vdi_destroy_f)
231 12
vmod_shard_destroy(VCL_BACKEND dir)
232
{
233
        struct sharddir *shardd;
234
235 12
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
236 12
        sharddir_delete(&shardd);
237 12
}
238
239
static const struct vdi_methods vmod_shard_methods[1] = {{
240
        .magic =        VDI_METHODS_MAGIC,
241
        .type =         "shard",
242
        .resolve =      vmod_shard_resolve,
243
        .healthy =      vmod_shard_healthy,
244
        .destroy =      vmod_shard_destroy
245
}};
246
247
248
VCL_VOID v_matchproto_(td_directors_shard__init)
249 84
vmod_shard__init(VRT_CTX, struct vmod_directors_shard **vshardp,
250
    const char *vcl_name)
251
{
252
        struct vmod_directors_shard *vshard;
253
254 84
        shard__assert();
255
256 84
        AN(vshardp);
257 84
        AZ(*vshardp);
258 84
        ALLOC_OBJ(vshard, VMOD_SHARD_SHARD_MAGIC);
259 84
        AN(vshard);
260
261 84
        *vshardp = vshard;
262 84
        sharddir_new(&vshard->shardd, vcl_name, &shard_param_default);
263
264 84
        vshard->dir = VRT_AddDirector(ctx, vmod_shard_methods, vshard->shardd,
265
            "%s", vcl_name);
266 84
}
267
268
VCL_VOID v_matchproto_(td_directors_shard__fini)
269 12
vmod_shard__fini(struct vmod_directors_shard **vshardp)
270
{
271
        struct vmod_directors_shard *vshard;
272
273
        // XXX 2297
274 12
        if (*vshardp == NULL)
275 0
                return;
276
277 12
        TAKE_OBJ_NOTNULL(vshard, vshardp, VMOD_SHARD_SHARD_MAGIC);
278 12
        VRT_DelDirector(&vshard->dir);
279 12
        FREE_OBJ(vshard);
280
}
281
282
VCL_INT v_matchproto_(td_directors_shard_key)
283 44
vmod_shard_key(VRT_CTX, struct vmod_directors_shard *vshard, const char *s, ...)
284
{
285
        va_list ap;
286
        uint32_t r;
287
288
        (void)ctx;
289
        (void)vshard;
290
291 44
        va_start(ap, s);
292 44
        r = sharddir_sha256v(s, ap);
293 44
        va_end(ap);
294
295 44
        return ((VCL_INT)r);
296
}
297
298
VCL_VOID v_matchproto_(td_directors_set_warmup)
299 0
vmod_shard_set_warmup(VRT_CTX, struct vmod_directors_shard *vshard,
300
    VCL_REAL probability)
301
{
302 0
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
303 0
        if (probability < 0 || probability >= 1) {
304 0
                shard_err(ctx, vshard->shardd,
305
                    ".set_warmup(%f) ignored", probability);
306 0
                return;
307
        }
308 0
        shardcfg_set_warmup(vshard->shardd, probability);
309
}
310
311
VCL_VOID v_matchproto_(td_directors_set_rampup)
312 4
vmod_shard_set_rampup(VRT_CTX, struct vmod_directors_shard *vshard,
313
    VCL_DURATION duration)
314
{
315
        (void)ctx;
316 4
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
317 4
        shardcfg_set_rampup(vshard->shardd, duration);
318 4
}
319
320
VCL_VOID v_matchproto_(td_directors_shard_associate)
321 12
vmod_shard_associate(VRT_CTX,
322
    struct vmod_directors_shard *vshard, VCL_BLOB b)
323
{
324
        const struct vmod_directors_shard_param *ppt;
325 12
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
326
327 12
        if (b == NULL) {
328 0
                sharddir_set_param(vshard->shardd, &shard_param_default);
329 0
                return;
330
        }
331
332 12
        ppt = shard_param_blob(b);
333
334 12
        if (ppt == NULL) {
335 4
                VRT_fail(ctx, "shard .associate param invalid");
336 4
                return;
337
        }
338
339 8
        sharddir_set_param(vshard->shardd, ppt);
340
}
341
342
VCL_BOOL v_matchproto_(td_directors_shard_add_backend)
343 328
vmod_shard_add_backend(VRT_CTX, struct vmod_directors_shard *vshard,
344
    struct vmod_shard_add_backend_arg *args)
345
{
346 328
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
347
348 328
        if (args->backend == NULL) {
349 0
                shard_err0(ctx, vshard->shardd,
350
                    ".backend_add() NULL backend given");
351 0
                return (0);
352
        }
353
354 984
        return shardcfg_add_backend(ctx, args->arg1,
355 328
            vshard->shardd, args->backend,
356 328
            args->valid_ident ? args->ident : NULL,
357 328
            args->valid_rampup ? args->rampup : nan(""));
358
}
359
360
VCL_BOOL v_matchproto_(td_directors_shard_remove_backend)
361 32
vmod_shard_remove_backend(VRT_CTX, struct vmod_directors_shard *vshard,
362
    struct vmod_shard_remove_backend_arg *args)
363
{
364 32
        VCL_BACKEND be = args->valid_backend ? args->backend : NULL;
365 32
        VCL_STRING ident = args->valid_ident ? args->ident : NULL;
366
367 32
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
368
369 32
        if (be == NULL && ident == NULL) {
370 0
                shard_err0(ctx, vshard->shardd,
371
                    ".backend_remove() at least one of backend "
372
                    "and ident must be given");
373 0
                return 0;
374
        }
375
376 32
        return shardcfg_remove_backend(ctx, args->arg1, vshard->shardd,
377
            be, ident);
378
}
379
380
VCL_BOOL v_matchproto_(td_directors_shard_clear)
381 56
vmod_shard_clear(VRT_CTX, struct vmod_directors_shard *vshard,
382
    struct vmod_priv *priv)
383
{
384 56
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
385 56
        return shardcfg_clear(ctx, priv, vshard->shardd);
386
}
387
388
VCL_BOOL v_matchproto_(td_directors_shard_reconfigure)
389 152
vmod_shard_reconfigure(VRT_CTX, struct vmod_directors_shard *vshard,
390
    struct vmod_priv *priv, VCL_INT replicas)
391
{
392 152
        return shardcfg_reconfigure(ctx, priv, vshard->shardd, replicas);
393
}
394
395
static inline uint32_t
396 252
shard_get_key(VRT_CTX, const struct vmod_directors_shard_param *p)
397
{
398
        struct http *http;
399
400 252
        switch (p->by) {
401
        case BY_HASH:
402 72
                if (ctx->bo) {
403 28
                        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
404 28
                        return (vbe32dec(ctx->bo->digest));
405
                }
406
                /* FALLTHROUGH */
407
        case BY_URL:
408 72
                if (ctx->http_req) {
409 48
                        AN(http = ctx->http_req);
410
                } else {
411 24
                        AN(ctx->http_bereq);
412 24
                        AN(http = ctx->http_bereq);
413
                }
414 72
                return (sharddir_sha256(http->hd[HTTP_HDR_URL].b,
415
                                        vrt_magic_string_end));
416
        case BY_KEY:
417
        case BY_BLOB:
418 152
                return (p->key);
419
        default:
420 0
                WRONG("by enum");
421
        }
422
}
423
424
/*
425
 * merge parameters to resolve all undef values
426
 * key is to be calculated after merging
427
 */
428
static void
429 1912
shard_param_merge(struct vmod_directors_shard_param *to,
430
                  const struct vmod_directors_shard_param *from)
431
{
432 1912
        CHECK_OBJ_NOTNULL(to, VMOD_SHARD_SHARD_PARAM_MAGIC);
433 1912
        assert((to->mask & ~_arg_mask_param) == 0);
434
435 1912
        if (to->mask == _arg_mask_param)
436 0
                return;
437
438 1912
        CHECK_OBJ_NOTNULL(from, VMOD_SHARD_SHARD_PARAM_MAGIC);
439 1912
        assert((from->mask & ~_arg_mask_param) == 0);
440
441 1912
        if ((to->mask & arg_by) == 0 && (from->mask & arg_by) != 0) {
442 672
                to->by = from->by;
443 672
                if (from->by == BY_KEY || from->by == BY_BLOB)
444 252
                        to->key = from->key;
445
        }
446
447
#define mrg(to, from, field) do {                                       \
448
                if (((to)->mask & arg_ ## field) == 0 &&                \
449
                    ((from)->mask & arg_ ## field) != 0)                \
450
                        (to)->field = (from)->field;                    \
451
        } while(0)
452
453 1912
        mrg(to, from, healthy);
454 1912
        mrg(to, from, rampup);
455 1912
        mrg(to, from, alt);
456 1912
        mrg(to, from, warmup);
457
#undef mrg
458
459 1912
        to->mask |= from->mask;
460
461 1912
        if (to->mask == _arg_mask_param)
462 752
                return;
463
464 1160
        AN(from->defaults);
465 1160
        shard_param_merge(to, from->defaults);
466
}
467
468
static uint32_t
469 28
shard_blob_key(VCL_BLOB key_blob)
470
{
471 28
        uint8_t k[4] = { 0 };
472
        uint8_t *b;
473
        int i, ki;
474
475 28
        assert(key_blob);
476 28
        assert(key_blob->len > 0);
477 28
        assert(key_blob->priv != NULL);
478
479 28
        if (key_blob->len >= 4)
480 28
                ki = 0;
481
        else
482 0
                ki = 4 - key_blob->len;
483
484 28
        b = key_blob->priv;
485 140
        for (i = 0; ki < 4; i++, ki++)
486 112
                k[ki] = b[i];
487 28
        assert(i <= key_blob->len);
488
489 28
        return (vbe32dec(k));
490
}
491
492
/*
493
 * convert vmod interface valid_* to our bitmask
494
 */
495
496
#define tobit(args, name) ((args)->valid_##name ? arg_##name : 0)
497
498
static uint32_t
499 172
shard_backend_arg_mask(const struct vmod_shard_backend_arg * const a)
500
{
501 344
        return (tobit(a, by)            |
502 344
                tobit(a, key)           |
503 344
                tobit(a, key_blob)      |
504 344
                tobit(a, alt)           |
505 344
                tobit(a, warmup)        |
506 344
                tobit(a, rampup)        |
507 344
                tobit(a, healthy)       |
508 344
                tobit(a, param)         |
509 172
                tobit(a, resolve));
510
}
511
static uint32_t
512 380
shard_param_set_mask(const struct vmod_shard_param_set_arg * const a)
513
{
514 760
        return (tobit(a, by)            |
515 760
                tobit(a, key)           |
516 760
                tobit(a, key_blob)      |
517 760
                tobit(a, alt)           |
518 760
                tobit(a, warmup)        |
519 760
                tobit(a, rampup)        |
520 380
                tobit(a, healthy));
521
}
522
#undef tobit
523
524
/*
525
 * check arguments and return in a struct param
526
 */
527
static struct vmod_directors_shard_param *
528 520
shard_param_args(VRT_CTX,
529
    struct vmod_directors_shard_param *p, const char *who,
530
    uint32_t args, VCL_ENUM by_s, VCL_INT key_int, VCL_BLOB key_blob,
531
    VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy_s)
532
{
533
        enum by_e       by;
534
        enum healthy_e  healthy;
535
536 520
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
537 520
        AN(p->vcl_name);
538
539 520
        assert((args & ~_arg_mask_set) == 0);
540
541 520
        by = (args & arg_by) ? parse_by_e(by_s) : BY_HASH;
542 520
        healthy = (args & arg_healthy) ? parse_healthy_e(healthy_s) : CHOSEN;
543
544
        /* by_s / key_int / key_blob */
545 520
        if (args & arg_by) {
546 464
                switch (by) {
547
                case BY_KEY:
548 216
                        if ((args & arg_key) == 0) {
549 4
                                VRT_fail(ctx, "%s %s: "
550
                                         "missing key argument with by=%s",
551
                                         who, p->vcl_name, by_s);
552 4
                                return (NULL);
553
                        }
554 212
                        if (key_int < 0 || key_int > UINT32_MAX) {
555 4
                                VRT_fail(ctx, "%s %s: "
556
                                         "invalid key argument %jd with by=%s",
557
                                         who, p->vcl_name,
558
                                         (intmax_t)key_int, by_s);
559 4
                                return (NULL);
560
                        }
561 208
                        assert(key_int >= 0);
562 208
                        assert(key_int <= UINT32_MAX);
563 208
                        p->key = (uint32_t)key_int;
564 208
                        break;
565
                case BY_BLOB:
566 36
                        if ((args & arg_key_blob) == 0) {
567 4
                                VRT_fail(ctx, "%s %s: "
568
                                         "missing key_blob argument with by=%s",
569
                                         who, p->vcl_name, by_s);
570 4
                                return (NULL);
571
                        }
572 60
                        if (key_blob == NULL || key_blob->len <= 0 ||
573 28
                            key_blob->priv == NULL) {
574 4
                                sharddir_err(ctx, SLT_Error, "%s %s: "
575
                                             "by=BLOB but no or empty key_blob "
576
                                             "- using key 0",
577
                                             who, p->vcl_name);
578 4
                                p->key = 0;
579
                        } else
580 28
                                p->key = shard_blob_key(key_blob);
581 32
                        break;
582
                case BY_HASH:
583
                case BY_URL:
584 212
                        if (args & (arg_key|arg_key_blob)) {
585 4
                                VRT_fail(ctx, "%s %s: "
586
                                         "key and key_blob arguments are "
587
                                         "invalid with by=%s",
588
                                         who, p->vcl_name, by_s);
589 4
                                return (NULL);
590
                        }
591 208
                        break;
592
                default:
593 0
                        WRONG("by enum");
594
                }
595 448
                p->by = by;
596
        } else {
597
                /* (args & arg_by) == 0 */
598 56
                p->by = BY_HASH;
599
600 56
                if (args & (arg_key|arg_key_blob)) {
601 4
                        VRT_fail(ctx, "%s %s: "
602
                                 "key and key_blob arguments are "
603
                                 "invalid with by=HASH (default)",
604
                                 who, p->vcl_name);
605 4
                        return (NULL);
606
                }
607
        }
608
609 500
        if (args & arg_alt) {
610 132
                if (alt < 0) {
611 4
                        VRT_fail(ctx, "%s %s: "
612
                                 "invalid alt argument %jd",
613
                                 who, p->vcl_name, (intmax_t)alt);
614 4
                        return (NULL);
615
                }
616 128
                p->alt = alt;
617
        }
618
619 496
        if (args & arg_warmup) {
620 72
                if ((warmup < 0 && warmup != -1) || warmup > 1) {
621 8
                        VRT_fail(ctx, "%s %s: "
622
                                 "invalid warmup argument %f",
623
                                 who, p->vcl_name, warmup);
624 8
                        return (NULL);
625
                }
626 64
                p->warmup = warmup;
627
        }
628
629 488
        if (args & arg_rampup)
630 24
                p->rampup = !!rampup;
631
632 488
        if (args & arg_healthy)
633 48
                p->healthy = healthy;
634
635 488
        p->mask = args & _arg_mask_param;
636 488
        return (p);
637
}
638
639
VCL_BACKEND v_matchproto_(td_directors_shard_backend)
640 172
vmod_shard_backend(VRT_CTX, struct vmod_directors_shard *vshard,
641
                   struct vmod_shard_backend_arg *a)
642
{
643
        struct vmod_directors_shard_param pstk;
644 172
        struct vmod_directors_shard_param *pp = NULL;
645
        const struct vmod_directors_shard_param *ppt;
646
        enum resolve_e resolve;
647 172
        uint32_t args = shard_backend_arg_mask(a);
648
649 172
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
650 172
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
651 172
        assert((args & ~_arg_mask) == 0);
652
653 172
        if (args & arg_resolve)
654 48
                resolve = parse_resolve_e(a->resolve);
655 124
        else if (ctx->method & VCL_MET_TASK_H)
656 4
                resolve = LAZY;
657
        else
658 120
                resolve = NOW;
659
660 172
        switch (resolve) {
661
        case LAZY:
662 48
                if ((args & ~arg_resolve) == 0) {
663 16
                        AN(vshard->dir);
664 16
                        return (vshard->dir);
665
                }
666
667 32
                if ((ctx->method & VCL_MET_TASK_B) == 0) {
668 4
                        VRT_fail(ctx, "shard .backend resolve=LAZY with other "
669
                                 "parameters can only be used in backend "
670
                                 "context");
671 4
                        return (NULL);
672
                }
673
674 28
                assert(ctx->method & VCL_MET_TASK_B);
675
676 28
                pp = shard_param_task(ctx, vshard->shardd,
677 28
                                      vshard->shardd->param);
678 28
                if (pp == NULL)
679 0
                        return (NULL);
680 28
                pp->vcl_name = vshard->shardd->name;
681 28
                break;
682
        case NOW:
683 124
                if (ctx->method & VCL_MET_TASK_H) {
684 4
                        VRT_fail(ctx,
685
                                 "shard .backend resolve=NOW can not be "
686
                                 "used in vcl_init{}/vcl_fini{}");
687 4
                        return (NULL);
688
                }
689 120
                pp = shard_param_stack(&pstk, vshard->shardd->param,
690 120
                                       vshard->shardd->name);
691 120
                break;
692
        default:
693 0
                WRONG("resolve enum");
694
        }
695
696 148
        AN(pp);
697 148
        if (args & arg_param) {
698 8
                ppt = shard_param_blob(a->param);
699 8
                if (ppt == NULL) {
700 4
                        VRT_fail(ctx, "shard .backend param invalid");
701 4
                        return (NULL);
702
                }
703 4
                pp->defaults = ppt;
704
        }
705
706 144
        pp = shard_param_args(ctx, pp, "shard.backend()",
707
                              args & _arg_mask_set,
708
                              a->by, a->key, a->key_blob, a->alt, a->warmup,
709
                              a->rampup, a->healthy);
710 144
        if (pp == NULL)
711 0
                return (NULL);
712
713 144
        if (resolve == LAZY)
714 28
                return (vshard->dir);
715
716 116
        assert(resolve == NOW);
717 116
        shard_param_merge(pp, pp->defaults);
718 116
        return (sharddir_pick_be(ctx, vshard->shardd,
719
                                 shard_get_key(ctx, pp), pp->alt, pp->warmup,
720
                                 pp->rampup, pp->healthy));
721
}
722
723
static VCL_BOOL v_matchproto_(vdi_healthy)
724 36
vmod_shard_healthy(VRT_CTX, VCL_BACKEND dir, VCL_TIME *changed)
725
{
726
        struct sharddir *shardd;
727
728 36
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
729 36
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
730 36
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
731 36
        return (sharddir_any_healthy(ctx, shardd, changed));
732
}
733
734
static VCL_BACKEND v_matchproto_(vdi_resolve_f)
735 36
vmod_shard_resolve(VRT_CTX, VCL_BACKEND dir)
736
{
737
        struct sharddir *shardd;
738
        struct vmod_directors_shard_param pstk[1];
739
        const struct vmod_directors_shard_param *pp;
740
741 36
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
742 36
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
743 36
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
744
745 36
        pp = vmod_shard_param_read(ctx, shardd, shardd->param,
746
                                   pstk, "shard_resolve");
747 36
        if (pp == NULL)
748 0
                return (NULL);
749
750 36
        return (sharddir_pick_be(ctx, shardd,
751
                                 shard_get_key(ctx, pp), pp->alt, pp->warmup,
752
                                 pp->rampup, pp->healthy));
753
}
754
755
VCL_VOID v_matchproto_(td_directors_shard_backend)
756 24
vmod_shard_debug(VRT_CTX, struct vmod_directors_shard *vshard,
757
    VCL_INT i)
758
{
759 24
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
760
761
        (void)ctx;
762 24
        sharddir_debug(vshard->shardd, i & UINT32_MAX);
763 24
}
764
765
/* =============================================================
766
 * shard_param
767
 */
768
769
VCL_VOID v_matchproto_(td_directors_shard_param__init)
770 68
vmod_shard_param__init(VRT_CTX,
771
    struct vmod_directors_shard_param **pp, const char *vcl_name)
772
{
773
        struct vmod_directors_shard_param *p;
774
775
        (void) ctx;
776 68
        AN(pp);
777 68
        AZ(*pp);
778 68
        ALLOC_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
779 68
        AN(p);
780 68
        p->vcl_name = vcl_name;
781 68
        p->scope = SCOPE_VCL;
782 68
        p->defaults = &shard_param_default;
783
784 68
        *pp = p;
785 68
}
786
787
VCL_VOID v_matchproto_(td_directors_shard_param__fini)
788 32
vmod_shard_param__fini(struct vmod_directors_shard_param **pp)
789
{
790
        struct vmod_directors_shard_param *p;
791
792
        // XXX 2297
793 32
        if (*pp == NULL)
794 0
                return;
795
796 32
        TAKE_OBJ_NOTNULL(p, pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
797 32
        FREE_OBJ(p);
798
}
799
800
/*
801
 * init a stack param struct defaulting to pa with the given name
802
 */
803
static struct vmod_directors_shard_param *
804 756
shard_param_stack(struct vmod_directors_shard_param *p,
805
    const struct vmod_directors_shard_param *pa, const char *who)
806
{
807 756
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
808 756
        assert(pa->scope > _SCOPE_INVALID);
809
810 756
        AN(p);
811 756
        INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
812 756
        p->vcl_name = who;
813 756
        p->scope = SCOPE_STACK;
814 756
        p->defaults = pa;
815
816 756
        return (p);
817
}
818
/*
819
 * get a task scoped param struct for id defaulting to pa
820
 * if id != pa and pa has VCL scope, also get a task scoped param struct for pa
821
 */
822
static struct vmod_directors_shard_param *
823 884
shard_param_task(VRT_CTX, const void *id,
824
   const struct vmod_directors_shard_param *pa)
825
{
826
        struct vmod_directors_shard_param *p;
827
        struct vmod_priv *task;
828
829 884
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
830 884
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
831 884
        assert(pa->scope > _SCOPE_INVALID);
832
833 884
        task = VRT_priv_task(ctx, id);
834
835 884
        if (task == NULL) {
836 0
                VRT_fail(ctx, "no priv_task");
837 0
                return (NULL);
838
        }
839
840 884
        if (task->priv) {
841 504
                CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC);
842 504
                assert(p->scope == SCOPE_TASK);
843 504
                return (p);
844
        }
845
846 380
        p = WS_Alloc(ctx->ws, sizeof *p);
847 380
        if (p == NULL) {
848 0
                VRT_fail(ctx, "shard_param_task WS_Alloc failed");
849 0
                return (NULL);
850
        }
851 380
        task->priv = p;
852 380
        INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
853 380
        p->vcl_name = pa->vcl_name;
854 380
        p->scope = SCOPE_TASK;
855
856 380
        if (id == pa || pa->scope != SCOPE_VCL)
857 356
                p->defaults = pa;
858
        else
859 24
                p->defaults = shard_param_task(ctx, pa, pa);
860
861 380
        return (p);
862
}
863
864
static struct vmod_directors_shard_param *
865 380
shard_param_prep(VRT_CTX, struct vmod_directors_shard_param *p,
866
    const char *who)
867
{
868 380
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
869 380
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
870
871 380
        if (ctx->method & VCL_MET_TASK_C) {
872 4
                VRT_fail(ctx, "%s may only be used "
873
                         "in vcl_init and in backend context", who);
874 4
                return (NULL);
875 376
        } else if (ctx->method & VCL_MET_TASK_B)
876 316
                p = shard_param_task(ctx, p, p);
877
        else
878 60
                assert(ctx->method & VCL_MET_TASK_H);
879
880 376
        return (p);
881
}
882
883
VCL_VOID v_matchproto_(td_directors_shard_param_set)
884 380
vmod_shard_param_set(VRT_CTX, struct vmod_directors_shard_param *p,
885
                     struct vmod_shard_param_set_arg *a)
886
{
887 380
        uint32_t args = shard_param_set_mask(a);
888
889 380
        assert((args & ~_arg_mask_set) == 0);
890
891 380
        p = shard_param_prep(ctx, p, "shard_param.set()");
892 380
        if (p == NULL)
893 4
                return;
894 376
        (void) shard_param_args(ctx, p, "shard_param.set()", args,
895
                                a->by, a->key, a->key_blob, a->alt, a->warmup,
896
                                a->rampup, a->healthy);
897
}
898
899
VCL_VOID v_matchproto_(td_directors_shard_param_clear)
900 0
vmod_shard_param_clear(VRT_CTX,
901
    struct vmod_directors_shard_param *p)
902
{
903 0
        p = shard_param_prep(ctx, p, "shard_param.clear()");
904 0
        if (p == NULL)
905 0
                return;
906 0
        p->mask = 0;
907
}
908
909
static const struct vmod_directors_shard_param *
910 636
vmod_shard_param_read(VRT_CTX, const void *id,
911
    const struct vmod_directors_shard_param *p,
912
    struct vmod_directors_shard_param *pstk, const char *who)
913
{
914
        struct vmod_directors_shard_param *pp;
915
916 636
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
917 636
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
918
        (void) who; // XXX
919
920 636
        if (ctx->method == 0 || (ctx->method & VCL_MET_TASK_B))
921 516
                p = shard_param_task(ctx, id, p);
922
923 636
        if (p == NULL)
924 0
                return (NULL);
925
926 636
        pp = shard_param_stack(pstk, p, p->vcl_name);
927 636
        AN(pp);
928 636
        shard_param_merge(pp, p);
929 636
        return (pp);
930
}
931
932
VCL_STRING v_matchproto_(td_directors_shard_param_get_by)
933 100
vmod_shard_param_get_by(VRT_CTX,
934
    struct vmod_directors_shard_param *p)
935
{
936
        struct vmod_directors_shard_param pstk;
937
        const struct vmod_directors_shard_param *pp;
938
939 100
        pp = vmod_shard_param_read(ctx, p, p, &pstk, "shard_param.get_by()");
940 100
        if (pp == NULL)
941 0
                return (NULL);
942 100
        assert(pp->by > _BY_E_INVALID);
943 100
        return (by_str[pp->by]);
944
}
945
946
VCL_INT v_matchproto_(td_directors_shard_param_get_key)
947 100
vmod_shard_param_get_key(VRT_CTX,
948
    struct vmod_directors_shard_param *p)
949
{
950
        struct vmod_directors_shard_param pstk;
951
        const struct vmod_directors_shard_param *pp;
952
953 100
        pp = vmod_shard_param_read(ctx, p, p, &pstk, "shard_param.get_key()");
954 100
        if (pp == NULL)
955 0
                return (-1);
956 100
        return ((VCL_INT)shard_get_key(ctx, pp));
957
}
958
VCL_INT v_matchproto_(td_directors_shard_param_get_alt)
959 100
vmod_shard_param_get_alt(VRT_CTX,
960
    struct vmod_directors_shard_param *p)
961
{
962
        struct vmod_directors_shard_param pstk;
963
        const struct vmod_directors_shard_param *pp;
964
965 100
        pp = vmod_shard_param_read(ctx, p, p, &pstk,
966
                                   "shard_param.get_alt()");
967 100
        if (pp == NULL)
968 0
                return (-1);
969 100
        return (pp->alt);
970
}
971
972
VCL_REAL v_matchproto_(td_directors_shard_param_get_warmup)
973 100
vmod_shard_param_get_warmup(VRT_CTX,
974
    struct vmod_directors_shard_param *p)
975
{
976
        struct vmod_directors_shard_param pstk;
977
        const struct vmod_directors_shard_param *pp;
978
979 100
        pp = vmod_shard_param_read(ctx, p, p, &pstk,
980
                                   "shard_param.get_warmup()");
981 100
        if (pp == NULL)
982 0
                return (-2);
983 100
        return (pp->warmup);
984
}
985
986
VCL_BOOL v_matchproto_(td_directors_shard_param_get_rampup)
987 100
vmod_shard_param_get_rampup(VRT_CTX,
988
    struct vmod_directors_shard_param *p)
989
{
990
        struct vmod_directors_shard_param pstk;
991
        const struct vmod_directors_shard_param *pp;
992
993 100
        pp = vmod_shard_param_read(ctx, p, p, &pstk,
994
                                   "shard_param.get_rampup()");
995 100
        if (pp == NULL)
996 0
                return (0);
997 100
        return (pp->rampup);
998
}
999
1000
VCL_STRING v_matchproto_(td_directors_shard_param_get_healthy)
1001 100
vmod_shard_param_get_healthy(VRT_CTX,
1002
    struct vmod_directors_shard_param *p)
1003
{
1004
        struct vmod_directors_shard_param pstk;
1005
        const struct vmod_directors_shard_param *pp;
1006
1007 100
        pp = vmod_shard_param_read(ctx, p, p, &pstk,
1008
                                   "shard_param.get_healthy()");
1009 100
        if (pp == NULL)
1010 0
                return (NULL);
1011 100
        assert(pp->healthy > _HEALTHY_E_INVALID);
1012 100
        return (healthy_str[pp->healthy]);
1013
1014
}
1015
1016
static const struct vmod_directors_shard_param *
1017 20
shard_param_blob(const VCL_BLOB blob)
1018
{
1019 40
        if (blob && blob->priv &&
1020 32
            blob->len == sizeof(struct vmod_directors_shard_param) &&
1021 12
            *(unsigned *)blob->priv == VMOD_SHARD_SHARD_PARAM_MAGIC)
1022 12
                return (blob->priv);
1023 8
        return (NULL);
1024
}
1025
1026
VCL_BLOB v_matchproto_(td_directors_shard_param_use)
1027 12
vmod_shard_param_use(VRT_CTX,
1028
    struct vmod_directors_shard_param *p)
1029
{
1030
        struct vmod_priv *blob;
1031
1032 12
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
1033 12
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1034
1035 12
        blob = (void *)WS_Alloc(ctx->ws, sizeof *blob);
1036 12
        if (blob == NULL) {
1037 0
                VRT_fail(ctx, "Workspace overflow (param.use())");
1038 0
                return (NULL);
1039
        }
1040
1041 12
        memset(blob, 0, sizeof *blob);
1042 12
        blob->len = sizeof *p;
1043 12
        blob->priv = p;
1044
1045 12
        return (blob);
1046
}