varnish-cache/lib/libvmod_directors/vmod_shard.c
1
/*-
2
 * Copyright 2009-2018 UPLEX - Nils Goroll Systemoptimierung
3
 * All rights reserved.
4
 *
5
 * Authors: Julian Wiesener <jw@uplex.de>
6
 *          Nils Goroll <slink@uplex.de>
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
#include "config.h"
31
32
#include <stdlib.h>
33
#include <string.h>
34
35
#include "cache/cache.h"
36
#include "vcl.h"
37
38
#include "vend.h"
39
40
#include "vcc_if.h"
41
#include "shard_dir.h"
42
#include "shard_cfg.h"
43
44
/* -------------------------------------------------------------------------
45
 *  shard director: LAZY mode (vdi resolve function), parameter objects
46
 *
47
 *  By associating a parameter object with a shard director, we enable LAZY
48
 *  lookups as with the other directors. Parameter objects are defined with VCL
49
 *  scope (normal vmod objects), but can be overridden per backend request using
50
 *  a task priv.
51
 *
52
 *  We use the same concept to carry shard.backend() parameters to vdi resolve
53
 *  for LAZY mode: They get saved in a per-director task scope parameter object.
54
 *
55
 *  Each object points to another object providing defaults for values which are
56
 *  not defined.
57
 *
58
 *  Actual resolution of the various parameter objects does not happen before
59
 *  they are used, which enabled changing them independently (ie, shard
60
 *  .backend() parameters have precedence over an associated parameter object,
61
 *  which by itself can be overridden).
62
 *
63
 *  Overview of parameter objects (pointers are alternatives)
64
 *
65
 *  shard() director        shard_param() object    default praram
66
 *
67
 *               --------------------------------->   vmod static
68
 *    VCL obj   /                                ->
69
 *    .param  -+--------->    VCL obj           /  _
70
 *                            .default  --------   /|
71
 *                                                /
72
 *                               ^               /
73
 *                               |              /
74
 *                                             /
75
 *                            .default        /
76
 *          ------------->    TASK priv      /
77
 *         /                                /
78
 *    .default -----------------------------
79
 *    TASK priv
80
 */
81
82
/* -------------------------------------------------------------------------
83
 * method arguments and set parameters bitmask in vmod_directors_shard_param
84
 */
85
86
#define arg_by          ((uint32_t)1)
87
#define arg_key         ((uint32_t)1 << 1)
88
#define arg_key_blob    ((uint32_t)1 << 2)
89
#define arg_alt         ((uint32_t)1 << 3)
90
#define arg_warmup      ((uint32_t)1 << 4)
91
#define arg_rampup      ((uint32_t)1 << 5)
92
#define arg_healthy     ((uint32_t)1 << 6)
93
#define arg_param       ((uint32_t)1 << 7)
94
#define arg_resolve     ((uint32_t)1 << 8)
95
#define _arg_mask       ((arg_resolve << 1) - 1)
96
/* allowed in shard_param.set */
97
#define _arg_mask_set   (arg_param - 1)
98
/* allowed in shard_param */
99
#define _arg_mask_param ( _arg_mask_set         \
100
                          & ~arg_key                    \
101
                          & ~arg_key_blob )
102
103
/* -------------------------------------------------------------------------
104
 * shard parameters - declaration & defaults
105
 */
106
enum vmod_directors_shard_param_scope {
107
        _SCOPE_INVALID = 0,
108
        SCOPE_VMOD,
109
        SCOPE_VCL,
110
        SCOPE_TASK,
111
        SCOPE_STACK
112
};
113
114
struct vmod_directors_shard_param;
115
116
struct vmod_directors_shard_param {
117
        unsigned                                magic;
118
#define VMOD_SHARD_SHARD_PARAM_MAGIC            0xdf5ca117
119
120
        /* internals */
121
        uint32_t                                key;
122
        const char                              *vcl_name;
123
        const struct vmod_directors_shard_param *defaults;
124
        enum vmod_directors_shard_param_scope   scope;
125
126
        /* parameters */
127
        enum by_e                               by;
128
        enum healthy_e                          healthy;
129
        uint32_t                                mask;
130
        VCL_BOOL                                rampup;
131
        VCL_INT                                 alt;
132
        VCL_REAL                                warmup;
133
};
134
135
static const struct vmod_directors_shard_param shard_param_default = {
136
        .magic          = VMOD_SHARD_SHARD_PARAM_MAGIC,
137
138
        .key            = 0,
139
        .vcl_name       = "builtin defaults",
140
        .defaults       = NULL,
141
        .scope          = SCOPE_VMOD,
142
143
        .mask           = _arg_mask_param,
144
        .by             = BY_HASH,
145
        .healthy        = CHOSEN,
146
        .rampup = 1,
147
        .alt            = 0,
148
        .warmup         = -1,
149
};
150
151
static struct vmod_directors_shard_param *
152
shard_param_stack(struct vmod_directors_shard_param *p,
153
    const struct vmod_directors_shard_param *pa, const char *who);
154
155
static struct vmod_directors_shard_param *
156
shard_param_task(VRT_CTX, const void *id,
157
    const struct vmod_directors_shard_param *pa);
158
159
static const struct vmod_directors_shard_param *
160
shard_param_blob(const VCL_BLOB blob);
161
162
static const struct vmod_directors_shard_param *
163
vmod_shard_param_read(VRT_CTX, const void *id,
164
    const struct vmod_directors_shard_param *p,
165
    struct vmod_directors_shard_param *pstk, const char *who);
166
167
/* -------------------------------------------------------------------------
168
 * shard vmod interface
169
 */
170
static vdi_healthy_f vmod_shard_healthy;
171
static vdi_resolve_f vmod_shard_resolve;
172
173
struct vmod_directors_shard {
174
        unsigned                                magic;
175
#define VMOD_SHARD_SHARD_MAGIC                  0x6e63e1bf
176
        struct sharddir                         *shardd;
177
        VCL_BACKEND                             dir;
178
};
179
180
static enum by_e
181 116
parse_by_e(VCL_ENUM e)
182
{
183
#define VMODENUM(n) if (e == vmod_enum_ ## n) return(BY_ ## n);
184
#include "tbl_by.h"
185 0
       WRONG("illegal by enum");
186
}
187
188
static enum healthy_e
189 12
parse_healthy_e(VCL_ENUM e)
190
{
191
#define VMODENUM(n) if (e == vmod_enum_ ## n) return(n);
192
#include "tbl_healthy.h"
193 0
       WRONG("illegal healthy enum");
194
}
195
196
static enum resolve_e
197 12
parse_resolve_e(VCL_ENUM e)
198
{
199
#define VMODENUM(n) if (e == vmod_enum_ ## n) return(n);
200
#include "tbl_resolve.h"
201 0
       WRONG("illegal resolve enum");
202
}
203
204
static const char * const by_str[_BY_E_MAX] = {
205
        [_BY_E_INVALID] = "*INVALID*",
206
#define VMODENUM(n) [BY_ ## n] = #n,
207
#include "tbl_by.h"
208
};
209
210
static const char * const healthy_str[_HEALTHY_E_MAX] = {
211
        [_HEALTHY_E_INVALID] = "*INVALID*",
212
#define VMODENUM(n) [n] = #n,
213
#include "tbl_healthy.h"
214
};
215
216
static void
217 21
shard__assert(void)
218
{
219
        VCL_INT t1;
220
        uint32_t t2a, t2b;
221
222
        /* we put our uint32 key in a VCL_INT container */
223
        assert(sizeof(VCL_INT) >= sizeof(uint32_t));
224 21
        t2a = UINT32_MAX;
225 21
        t1 = (VCL_INT)t2a;
226 21
        t2b = (uint32_t)t1;
227 21
        assert(t2a == t2b);
228 21
}
229
230
static void v_matchproto_(vdi_destroy_f)
231 3
vmod_shard_destroy(VCL_BACKEND dir)
232
{
233
        struct sharddir *shardd;
234
235 3
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
236 3
        sharddir_delete(&shardd);
237 3
}
238
239
static const struct vdi_methods vmod_shard_methods[1] = {{
240
        .magic =        VDI_METHODS_MAGIC,
241
        .type =         "shard",
242
        .resolve =      vmod_shard_resolve,
243
        .healthy =      vmod_shard_healthy,
244
        .destroy =      vmod_shard_destroy
245
}};
246
247
248
VCL_VOID v_matchproto_(td_directors_shard__init)
249 21
vmod_shard__init(VRT_CTX, struct vmod_directors_shard **vshardp,
250
    const char *vcl_name)
251
{
252
        struct vmod_directors_shard *vshard;
253
254 21
        shard__assert();
255
256 21
        AN(vshardp);
257 21
        AZ(*vshardp);
258 21
        ALLOC_OBJ(vshard, VMOD_SHARD_SHARD_MAGIC);
259 21
        AN(vshard);
260
261 21
        *vshardp = vshard;
262 21
        sharddir_new(&vshard->shardd, vcl_name, &shard_param_default);
263
264 21
        vshard->dir = VRT_AddDirector(ctx, vmod_shard_methods, vshard->shardd,
265
            "%s", vcl_name);
266 21
}
267
268
VCL_VOID v_matchproto_(td_directors_shard__fini)
269 3
vmod_shard__fini(struct vmod_directors_shard **vshardp)
270
{
271
        struct vmod_directors_shard *vshard;
272
273 3
        TAKE_OBJ_NOTNULL(vshard, vshardp, VMOD_SHARD_SHARD_MAGIC);
274 3
        VRT_DelDirector(&vshard->dir);
275 3
        FREE_OBJ(vshard);
276 3
}
277
278
VCL_INT v_matchproto_(td_directors_shard_key)
279 11
vmod_shard_key(VRT_CTX, struct vmod_directors_shard *vshard, const char *s, ...)
280
{
281
        va_list ap;
282
        uint32_t r;
283
284
        (void)ctx;
285
        (void)vshard;
286
287 11
        va_start(ap, s);
288 11
        r = sharddir_sha256v(s, ap);
289 11
        va_end(ap);
290
291 11
        return ((VCL_INT)r);
292
}
293
294
VCL_VOID v_matchproto_(td_directors_set_warmup)
295 0
vmod_shard_set_warmup(VRT_CTX, struct vmod_directors_shard *vshard,
296
    VCL_REAL probability)
297
{
298 0
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
299 0
        if (probability < 0 || probability >= 1) {
300 0
                shard_err(ctx, vshard->shardd,
301
                    ".set_warmup(%f) ignored", probability);
302 0
                return;
303
        }
304 0
        shardcfg_set_warmup(vshard->shardd, probability);
305
}
306
307
VCL_VOID v_matchproto_(td_directors_set_rampup)
308 1
vmod_shard_set_rampup(VRT_CTX, struct vmod_directors_shard *vshard,
309
    VCL_DURATION duration)
310
{
311
        (void)ctx;
312 1
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
313 1
        shardcfg_set_rampup(vshard->shardd, duration);
314 1
}
315
316
VCL_VOID v_matchproto_(td_directors_shard_associate)
317 3
vmod_shard_associate(VRT_CTX,
318
    struct vmod_directors_shard *vshard, VCL_BLOB b)
319
{
320
        const struct vmod_directors_shard_param *ppt;
321 3
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
322
323 3
        if (b == NULL) {
324 0
                sharddir_set_param(vshard->shardd, &shard_param_default);
325 0
                return;
326
        }
327
328 3
        ppt = shard_param_blob(b);
329
330 3
        if (ppt == NULL) {
331 1
                VRT_fail(ctx, "shard .associate param invalid");
332 1
                return;
333
        }
334
335 2
        sharddir_set_param(vshard->shardd, ppt);
336
}
337
338
VCL_BOOL v_matchproto_(td_directors_shard_add_backend)
339 82
vmod_shard_add_backend(VRT_CTX, struct vmod_directors_shard *vshard,
340
    struct vmod_shard_add_backend_arg *args)
341
{
342 82
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
343
344 82
        if (args->backend == NULL) {
345 0
                shard_err0(ctx, vshard->shardd,
346
                    ".backend_add() NULL backend given");
347 0
                return (0);
348
        }
349
350 246
        return shardcfg_add_backend(ctx, args->arg1,
351 82
            vshard->shardd, args->backend,
352 82
            args->valid_ident ? args->ident : NULL,
353 82
            args->valid_rampup ? args->rampup : nan(""));
354
}
355
356
VCL_BOOL v_matchproto_(td_directors_shard_remove_backend)
357 8
vmod_shard_remove_backend(VRT_CTX, struct vmod_directors_shard *vshard,
358
    struct vmod_shard_remove_backend_arg *args)
359
{
360 8
        VCL_BACKEND be = args->valid_backend ? args->backend : NULL;
361 8
        VCL_STRING ident = args->valid_ident ? args->ident : NULL;
362
363 8
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
364
365 8
        if (be == NULL && ident == NULL) {
366 0
                shard_err0(ctx, vshard->shardd,
367
                    ".backend_remove() at least one of backend "
368
                    "and ident must be given");
369 0
                return 0;
370
        }
371
372 8
        return shardcfg_remove_backend(ctx, args->arg1, vshard->shardd,
373
            be, ident);
374
}
375
376
VCL_BOOL v_matchproto_(td_directors_shard_clear)
377 14
vmod_shard_clear(VRT_CTX, struct vmod_directors_shard *vshard,
378
    struct vmod_priv *priv)
379
{
380 14
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
381 14
        return shardcfg_clear(ctx, priv, vshard->shardd);
382
}
383
384
VCL_BOOL v_matchproto_(td_directors_shard_reconfigure)
385 38
vmod_shard_reconfigure(VRT_CTX, struct vmod_directors_shard *vshard,
386
    struct vmod_priv *priv, VCL_INT replicas)
387
{
388 38
        return shardcfg_reconfigure(ctx, priv, vshard->shardd, replicas);
389
}
390
391
static inline uint32_t
392 63
shard_get_key(VRT_CTX, const struct vmod_directors_shard_param *p)
393
{
394
        struct http *http;
395
396 63
        switch (p->by) {
397
        case BY_HASH:
398 18
                if (ctx->bo) {
399 7
                        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
400 7
                        return (vbe32dec(ctx->bo->digest));
401
                }
402
                /* FALLTHROUGH */
403
        case BY_URL:
404 18
                if (ctx->http_req) {
405 12
                        AN(http = ctx->http_req);
406
                } else {
407 6
                        AN(ctx->http_bereq);
408 6
                        AN(http = ctx->http_bereq);
409
                }
410 18
                return (sharddir_sha256(http->hd[HTTP_HDR_URL].b,
411
                                        vrt_magic_string_end));
412
        case BY_KEY:
413
        case BY_BLOB:
414 38
                return (p->key);
415
        default:
416 0
                WRONG("by enum");
417
        }
418
}
419
420
/*
421
 * merge parameters to resolve all undef values
422
 * key is to be calculated after merging
423
 */
424
static void
425 478
shard_param_merge(struct vmod_directors_shard_param *to,
426
                  const struct vmod_directors_shard_param *from)
427
{
428 478
        CHECK_OBJ_NOTNULL(to, VMOD_SHARD_SHARD_PARAM_MAGIC);
429 478
        assert((to->mask & ~_arg_mask_param) == 0);
430
431 478
        if (to->mask == _arg_mask_param)
432 0
                return;
433
434 478
        CHECK_OBJ_NOTNULL(from, VMOD_SHARD_SHARD_PARAM_MAGIC);
435 478
        assert((from->mask & ~_arg_mask_param) == 0);
436
437 478
        if ((to->mask & arg_by) == 0 && (from->mask & arg_by) != 0) {
438 168
                to->by = from->by;
439 168
                if (from->by == BY_KEY || from->by == BY_BLOB)
440 63
                        to->key = from->key;
441
        }
442
443
#define mrg(to, from, field) do {                                       \
444
                if (((to)->mask & arg_ ## field) == 0 &&                \
445
                    ((from)->mask & arg_ ## field) != 0)                \
446
                        (to)->field = (from)->field;                    \
447
        } while(0)
448
449 478
        mrg(to, from, healthy);
450 478
        mrg(to, from, rampup);
451 478
        mrg(to, from, alt);
452 478
        mrg(to, from, warmup);
453
#undef mrg
454
455 478
        to->mask |= from->mask;
456
457 478
        if (to->mask == _arg_mask_param)
458 188
                return;
459
460 290
        AN(from->defaults);
461 290
        shard_param_merge(to, from->defaults);
462
}
463
464
static uint32_t
465 7
shard_blob_key(VCL_BLOB key_blob)
466
{
467 7
        uint8_t k[4] = { 0 };
468
        uint8_t *b;
469
        int i, ki;
470
471 7
        assert(key_blob);
472 7
        assert(key_blob->len > 0);
473 7
        assert(key_blob->priv != NULL);
474
475 7
        if (key_blob->len >= 4)
476 7
                ki = 0;
477
        else
478 0
                ki = 4 - key_blob->len;
479
480 7
        b = key_blob->priv;
481 35
        for (i = 0; ki < 4; i++, ki++)
482 28
                k[ki] = b[i];
483 7
        assert(i <= key_blob->len);
484
485 7
        return (vbe32dec(k));
486
}
487
488
/*
489
 * convert vmod interface valid_* to our bitmask
490
 */
491
492
#define tobit(args, name) ((args)->valid_##name ? arg_##name : 0)
493
494
static uint32_t
495 43
shard_backend_arg_mask(const struct vmod_shard_backend_arg * const a)
496
{
497 86
        return (tobit(a, by)            |
498 86
                tobit(a, key)           |
499 86
                tobit(a, key_blob)      |
500 86
                tobit(a, alt)           |
501 86
                tobit(a, warmup)        |
502 86
                tobit(a, rampup)        |
503 86
                tobit(a, healthy)       |
504 86
                tobit(a, param)         |
505 43
                tobit(a, resolve));
506
}
507
static uint32_t
508 95
shard_param_set_mask(const struct vmod_shard_param_set_arg * const a)
509
{
510 190
        return (tobit(a, by)            |
511 190
                tobit(a, key)           |
512 190
                tobit(a, key_blob)      |
513 190
                tobit(a, alt)           |
514 190
                tobit(a, warmup)        |
515 190
                tobit(a, rampup)        |
516 95
                tobit(a, healthy));
517
}
518
#undef tobit
519
520
/*
521
 * check arguments and return in a struct param
522
 */
523
static struct vmod_directors_shard_param *
524 130
shard_param_args(VRT_CTX,
525
    struct vmod_directors_shard_param *p, const char *who,
526
    uint32_t args, VCL_ENUM by_s, VCL_INT key_int, VCL_BLOB key_blob,
527
    VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy_s)
528
{
529
        enum by_e       by;
530
        enum healthy_e  healthy;
531
532 130
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
533 130
        AN(p->vcl_name);
534
535 130
        assert((args & ~_arg_mask_set) == 0);
536
537 130
        by = (args & arg_by) ? parse_by_e(by_s) : BY_HASH;
538 130
        healthy = (args & arg_healthy) ? parse_healthy_e(healthy_s) : CHOSEN;
539
540
        /* by_s / key_int / key_blob */
541 130
        if (args & arg_by) {
542 116
                switch (by) {
543
                case BY_KEY:
544 54
                        if ((args & arg_key) == 0) {
545 1
                                VRT_fail(ctx, "%s %s: "
546
                                         "missing key argument with by=%s",
547
                                         who, p->vcl_name, by_s);
548 1
                                return (NULL);
549
                        }
550 53
                        if (key_int < 0 || key_int > UINT32_MAX) {
551 1
                                VRT_fail(ctx, "%s %s: "
552
                                         "invalid key argument %jd with by=%s",
553
                                         who, p->vcl_name,
554
                                         (intmax_t)key_int, by_s);
555 1
                                return (NULL);
556
                        }
557 52
                        assert(key_int >= 0);
558 52
                        assert(key_int <= UINT32_MAX);
559 52
                        p->key = (uint32_t)key_int;
560 52
                        break;
561
                case BY_BLOB:
562 9
                        if ((args & arg_key_blob) == 0) {
563 1
                                VRT_fail(ctx, "%s %s: "
564
                                         "missing key_blob argument with by=%s",
565
                                         who, p->vcl_name, by_s);
566 1
                                return (NULL);
567
                        }
568 15
                        if (key_blob == NULL || key_blob->len <= 0 ||
569 7
                            key_blob->priv == NULL) {
570 1
                                sharddir_err(ctx, SLT_Error, "%s %s: "
571
                                             "by=BLOB but no or empty key_blob "
572
                                             "- using key 0",
573
                                             who, p->vcl_name);
574 1
                                p->key = 0;
575
                        } else
576 7
                                p->key = shard_blob_key(key_blob);
577 8
                        break;
578
                case BY_HASH:
579
                case BY_URL:
580 53
                        if (args & (arg_key|arg_key_blob)) {
581 1
                                VRT_fail(ctx, "%s %s: "
582
                                         "key and key_blob arguments are "
583
                                         "invalid with by=%s",
584
                                         who, p->vcl_name, by_s);
585 1
                                return (NULL);
586
                        }
587 52
                        break;
588
                default:
589 0
                        WRONG("by enum");
590
                }
591 112
                p->by = by;
592
        } else {
593
                /* (args & arg_by) == 0 */
594 14
                p->by = BY_HASH;
595
596 14
                if (args & (arg_key|arg_key_blob)) {
597 1
                        VRT_fail(ctx, "%s %s: "
598
                                 "key and key_blob arguments are "
599
                                 "invalid with by=HASH (default)",
600
                                 who, p->vcl_name);
601 1
                        return (NULL);
602
                }
603
        }
604
605 125
        if (args & arg_alt) {
606 33
                if (alt < 0) {
607 1
                        VRT_fail(ctx, "%s %s: "
608
                                 "invalid alt argument %jd",
609
                                 who, p->vcl_name, (intmax_t)alt);
610 1
                        return (NULL);
611
                }
612 32
                p->alt = alt;
613
        }
614
615 124
        if (args & arg_warmup) {
616 18
                if ((warmup < 0 && warmup != -1) || warmup > 1) {
617 2
                        VRT_fail(ctx, "%s %s: "
618
                                 "invalid warmup argument %f",
619
                                 who, p->vcl_name, warmup);
620 2
                        return (NULL);
621
                }
622 16
                p->warmup = warmup;
623
        }
624
625 122
        if (args & arg_rampup)
626 6
                p->rampup = !!rampup;
627
628 122
        if (args & arg_healthy)
629 12
                p->healthy = healthy;
630
631 122
        p->mask = args & _arg_mask_param;
632 122
        return (p);
633
}
634
635
VCL_BACKEND v_matchproto_(td_directors_shard_backend)
636 43
vmod_shard_backend(VRT_CTX, struct vmod_directors_shard *vshard,
637
                   struct vmod_shard_backend_arg *a)
638
{
639
        struct vmod_directors_shard_param pstk;
640 43
        struct vmod_directors_shard_param *pp = NULL;
641
        const struct vmod_directors_shard_param *ppt;
642
        enum resolve_e resolve;
643 43
        uint32_t args = shard_backend_arg_mask(a);
644
645 43
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
646 43
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
647 43
        assert((args & ~_arg_mask) == 0);
648
649 43
        if (args & arg_resolve)
650 12
                resolve = parse_resolve_e(a->resolve);
651 31
        else if (ctx->method & VCL_MET_TASK_H)
652 1
                resolve = LAZY;
653
        else
654 30
                resolve = NOW;
655
656 43
        switch (resolve) {
657
        case LAZY:
658 12
                if ((args & ~arg_resolve) == 0) {
659 4
                        AN(vshard->dir);
660 4
                        return (vshard->dir);
661
                }
662
663 8
                if ((ctx->method & VCL_MET_TASK_B) == 0) {
664 1
                        VRT_fail(ctx, "shard .backend resolve=LAZY with other "
665
                                 "parameters can only be used in backend "
666
                                 "context");
667 1
                        return (NULL);
668
                }
669
670 7
                assert(ctx->method & VCL_MET_TASK_B);
671
672 7
                pp = shard_param_task(ctx, vshard->shardd,
673 7
                                      vshard->shardd->param);
674 7
                if (pp == NULL)
675 0
                        return (NULL);
676 7
                pp->vcl_name = vshard->shardd->name;
677 7
                break;
678
        case NOW:
679 31
                if (ctx->method & VCL_MET_TASK_H) {
680 1
                        VRT_fail(ctx,
681
                                 "shard .backend resolve=NOW can not be "
682
                                 "used in vcl_init{}/vcl_fini{}");
683 1
                        return (NULL);
684
                }
685 30
                pp = shard_param_stack(&pstk, vshard->shardd->param,
686 30
                                       vshard->shardd->name);
687 30
                break;
688
        default:
689 0
                WRONG("resolve enum");
690
        }
691
692 37
        AN(pp);
693 37
        if (args & arg_param) {
694 2
                ppt = shard_param_blob(a->param);
695 2
                if (ppt == NULL) {
696 1
                        VRT_fail(ctx, "shard .backend param invalid");
697 1
                        return (NULL);
698
                }
699 1
                pp->defaults = ppt;
700
        }
701
702 36
        pp = shard_param_args(ctx, pp, "shard.backend()",
703
                              args & _arg_mask_set,
704
                              a->by, a->key, a->key_blob, a->alt, a->warmup,
705
                              a->rampup, a->healthy);
706 36
        if (pp == NULL)
707 0
                return (NULL);
708
709 36
        if (resolve == LAZY)
710 7
                return (vshard->dir);
711
712 29
        assert(resolve == NOW);
713 29
        shard_param_merge(pp, pp->defaults);
714 29
        return (sharddir_pick_be(ctx, vshard->shardd,
715
                                 shard_get_key(ctx, pp), pp->alt, pp->warmup,
716
                                 pp->rampup, pp->healthy));
717
}
718
719
static VCL_BOOL v_matchproto_(vdi_healthy)
720 9
vmod_shard_healthy(VRT_CTX, VCL_BACKEND dir, VCL_TIME *changed)
721
{
722
        struct sharddir *shardd;
723
724 9
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
725 9
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
726 9
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
727 9
        return (sharddir_any_healthy(ctx, shardd, changed));
728
}
729
730
static VCL_BACKEND v_matchproto_(vdi_resolve_f)
731 9
vmod_shard_resolve(VRT_CTX, VCL_BACKEND dir)
732
{
733
        struct sharddir *shardd;
734
        struct vmod_directors_shard_param pstk[1];
735
        const struct vmod_directors_shard_param *pp;
736
737 9
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
738 9
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
739 9
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
740
741 9
        pp = vmod_shard_param_read(ctx, shardd, shardd->param,
742
                                   pstk, "shard_resolve");
743 9
        if (pp == NULL)
744 0
                return (NULL);
745
746 9
        return (sharddir_pick_be(ctx, shardd,
747
                                 shard_get_key(ctx, pp), pp->alt, pp->warmup,
748
                                 pp->rampup, pp->healthy));
749
}
750
751
VCL_VOID v_matchproto_(td_directors_shard_backend)
752 6
vmod_shard_debug(VRT_CTX, struct vmod_directors_shard *vshard,
753
    VCL_INT i)
754
{
755 6
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
756
757
        (void)ctx;
758 6
        sharddir_debug(vshard->shardd, i & UINT32_MAX);
759 6
}
760
761
/* =============================================================
762
 * shard_param
763
 */
764
765
VCL_VOID v_matchproto_(td_directors_shard_param__init)
766 17
vmod_shard_param__init(VRT_CTX,
767
    struct vmod_directors_shard_param **pp, const char *vcl_name)
768
{
769
        struct vmod_directors_shard_param *p;
770
771
        (void) ctx;
772 17
        AN(pp);
773 17
        AZ(*pp);
774 17
        ALLOC_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
775 17
        AN(p);
776 17
        p->vcl_name = vcl_name;
777 17
        p->scope = SCOPE_VCL;
778 17
        p->defaults = &shard_param_default;
779
780 17
        *pp = p;
781 17
}
782
783
VCL_VOID v_matchproto_(td_directors_shard_param__fini)
784 8
vmod_shard_param__fini(struct vmod_directors_shard_param **pp)
785
{
786
        struct vmod_directors_shard_param *p;
787
788 8
        if (*pp == NULL)
789 0
                return;
790 8
        TAKE_OBJ_NOTNULL(p, pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
791 8
        FREE_OBJ(p);
792
}
793
794
/*
795
 * init a stack param struct defaulting to pa with the given name
796
 */
797
static struct vmod_directors_shard_param *
798 189
shard_param_stack(struct vmod_directors_shard_param *p,
799
    const struct vmod_directors_shard_param *pa, const char *who)
800
{
801 189
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
802 189
        assert(pa->scope > _SCOPE_INVALID);
803
804 189
        AN(p);
805 189
        INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
806 189
        p->vcl_name = who;
807 189
        p->scope = SCOPE_STACK;
808 189
        p->defaults = pa;
809
810 189
        return (p);
811
}
812
/*
813
 * get a task scoped param struct for id defaulting to pa
814
 * if id != pa and pa has VCL scope, also get a task scoped param struct for pa
815
 */
816
static struct vmod_directors_shard_param *
817 221
shard_param_task(VRT_CTX, const void *id,
818
   const struct vmod_directors_shard_param *pa)
819
{
820
        struct vmod_directors_shard_param *p;
821
        struct vmod_priv *task;
822
823 221
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
824 221
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
825 221
        assert(pa->scope > _SCOPE_INVALID);
826
827 221
        task = VRT_priv_task(ctx, id);
828
829 221
        if (task == NULL) {
830 0
                VRT_fail(ctx, "no priv_task");
831 0
                return (NULL);
832
        }
833
834 221
        if (task->priv) {
835 126
                CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC);
836 126
                assert(p->scope == SCOPE_TASK);
837
                /* XXX
838
                VSL(SLT_Debug, 0,
839
                    "shard_param_task(id %p, pa %p) = %p (found, ws=%p)",
840
                    id, pa, p, ctx->ws);
841
                */
842 126
                return (p);
843
        }
844
845 95
        p = WS_Alloc(ctx->ws, sizeof *p);
846 95
        if (p == NULL) {
847 0
                VRT_fail(ctx, "shard_param_task WS_Alloc failed");
848 0
                return (NULL);
849
        }
850 95
        task->priv = p;
851 95
        INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
852 95
        p->vcl_name = pa->vcl_name;
853 95
        p->scope = SCOPE_TASK;
854
855 95
        if (id == pa || pa->scope != SCOPE_VCL)
856 89
                p->defaults = pa;
857
        else
858 6
                p->defaults = shard_param_task(ctx, pa, pa);
859
860
        /* XXX
861
        VSL(SLT_Debug, 0,
862
            "shard_param_task(id %p, pa %p) = %p (new, defaults = %p, ws=%p)",
863
            id, pa, p, p->defaults, ctx->ws);
864
        */
865 95
        return (p);
866
}
867
868
static struct vmod_directors_shard_param *
869 95
shard_param_prep(VRT_CTX, struct vmod_directors_shard_param *p,
870
    const char *who)
871
{
872 95
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
873 95
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
874
875 95
        if (ctx->method & VCL_MET_TASK_C) {
876 1
                VRT_fail(ctx, "%s may only be used "
877
                         "in vcl_init and in backend context", who);
878 1
                return (NULL);
879 94
        } else if (ctx->method & VCL_MET_TASK_B)
880 79
                p = shard_param_task(ctx, p, p);
881
        else
882 15
                assert(ctx->method & VCL_MET_TASK_H);
883
884 94
        return (p);
885
}
886
887
VCL_VOID v_matchproto_(td_directors_shard_param_set)
888 95
vmod_shard_param_set(VRT_CTX, struct vmod_directors_shard_param *p,
889
                     struct vmod_shard_param_set_arg *a)
890
{
891 95
        uint32_t args = shard_param_set_mask(a);
892
893 95
        assert((args & ~_arg_mask_set) == 0);
894
895 95
        p = shard_param_prep(ctx, p, "shard_param.set()");
896 95
        if (p == NULL)
897 1
                return;
898 94
        (void) shard_param_args(ctx, p, "shard_param.set()", args,
899
                                a->by, a->key, a->key_blob, a->alt, a->warmup,
900
                                a->rampup, a->healthy);
901
}
902
903
VCL_VOID v_matchproto_(td_directors_shard_param_clear)
904 0
vmod_shard_param_clear(VRT_CTX,
905
    struct vmod_directors_shard_param *p)
906
{
907 0
        p = shard_param_prep(ctx, p, "shard_param.clear()");
908 0
        if (p == NULL)
909 0
                return;
910 0
        p->mask = 0;
911
}
912
913
static const struct vmod_directors_shard_param *
914 159
vmod_shard_param_read(VRT_CTX, const void *id,
915
    const struct vmod_directors_shard_param *p,
916
    struct vmod_directors_shard_param *pstk, const char *who)
917
{
918
        struct vmod_directors_shard_param *pp;
919
920 159
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
921 159
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
922
        (void) who; // XXX
923
924 159
        if (ctx->method == 0 || (ctx->method & VCL_MET_TASK_B))
925 129
                p = shard_param_task(ctx, id, p);
926
927 159
        if (p == NULL)
928 0
                return (NULL);
929
930 159
        pp = shard_param_stack(pstk, p, p->vcl_name);
931 159
        AN(pp);
932 159
        shard_param_merge(pp, p);
933 159
        return (pp);
934
}
935
936
VCL_STRING v_matchproto_(td_directors_shard_param_get_by)
937 25
vmod_shard_param_get_by(VRT_CTX,
938
    struct vmod_directors_shard_param *p)
939
{
940
        struct vmod_directors_shard_param pstk;
941
        const struct vmod_directors_shard_param *pp;
942
943 25
        pp = vmod_shard_param_read(ctx, p, p, &pstk, "shard_param.get_by()");
944 25
        if (pp == NULL)
945 0
                return (NULL);
946 25
        assert(pp->by > _BY_E_INVALID);
947 25
        return (by_str[pp->by]);
948
}
949
950
VCL_INT v_matchproto_(td_directors_shard_param_get_key)
951 25
vmod_shard_param_get_key(VRT_CTX,
952
    struct vmod_directors_shard_param *p)
953
{
954
        struct vmod_directors_shard_param pstk;
955
        const struct vmod_directors_shard_param *pp;
956
957 25
        pp = vmod_shard_param_read(ctx, p, p, &pstk, "shard_param.get_key()");
958 25
        if (pp == NULL)
959 0
                return (-1);
960 25
        return ((VCL_INT)shard_get_key(ctx, pp));
961
}
962
VCL_INT v_matchproto_(td_directors_shard_param_get_alt)
963 25
vmod_shard_param_get_alt(VRT_CTX,
964
    struct vmod_directors_shard_param *p)
965
{
966
        struct vmod_directors_shard_param pstk;
967
        const struct vmod_directors_shard_param *pp;
968
969 25
        pp = vmod_shard_param_read(ctx, p, p, &pstk,
970
                                   "shard_param.get_alt()");
971 25
        if (pp == NULL)
972 0
                return (-1);
973 25
        return (pp->alt);
974
}
975
976
VCL_REAL v_matchproto_(td_directors_shard_param_get_warmup)
977 25
vmod_shard_param_get_warmup(VRT_CTX,
978
    struct vmod_directors_shard_param *p)
979
{
980
        struct vmod_directors_shard_param pstk;
981
        const struct vmod_directors_shard_param *pp;
982
983 25
        pp = vmod_shard_param_read(ctx, p, p, &pstk,
984
                                   "shard_param.get_warmup()");
985 25
        if (pp == NULL)
986 0
                return (-2);
987 25
        return (pp->warmup);
988
}
989
990
VCL_BOOL v_matchproto_(td_directors_shard_param_get_rampup)
991 25
vmod_shard_param_get_rampup(VRT_CTX,
992
    struct vmod_directors_shard_param *p)
993
{
994
        struct vmod_directors_shard_param pstk;
995
        const struct vmod_directors_shard_param *pp;
996
997 25
        pp = vmod_shard_param_read(ctx, p, p, &pstk,
998
                                   "shard_param.get_rampup()");
999 25
        if (pp == NULL)
1000 0
                return (0);
1001 25
        return (pp->rampup);
1002
}
1003
1004
VCL_STRING v_matchproto_(td_directors_shard_param_get_healthy)
1005 25
vmod_shard_param_get_healthy(VRT_CTX,
1006
    struct vmod_directors_shard_param *p)
1007
{
1008
        struct vmod_directors_shard_param pstk;
1009
        const struct vmod_directors_shard_param *pp;
1010
1011 25
        pp = vmod_shard_param_read(ctx, p, p, &pstk,
1012
                                   "shard_param.get_healthy()");
1013 25
        if (pp == NULL)
1014 0
                return (NULL);
1015 25
        assert(pp->healthy > _HEALTHY_E_INVALID);
1016 25
        return (healthy_str[pp->healthy]);
1017
1018
}
1019
1020
static const struct vmod_directors_shard_param *
1021 5
shard_param_blob(const VCL_BLOB blob)
1022
{
1023 10
        if (blob && blob->priv &&
1024 8
            blob->len == sizeof(struct vmod_directors_shard_param) &&
1025 3
            *(unsigned *)blob->priv == VMOD_SHARD_SHARD_PARAM_MAGIC)
1026 3
                return (blob->priv);
1027 2
        return (NULL);
1028
}
1029
1030
VCL_BLOB v_matchproto_(td_directors_shard_param_use)
1031 3
vmod_shard_param_use(VRT_CTX,
1032
    struct vmod_directors_shard_param *p)
1033
{
1034
        struct vmod_priv *blob;
1035
1036 3
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
1037 3
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1038
1039 3
        blob = (void *)WS_Alloc(ctx->ws, sizeof *blob);
1040 3
        if (blob == NULL) {
1041 0
                VRT_fail(ctx, "Workspace overflow (param.use())");
1042 0
                return (NULL);
1043
        }
1044
1045 3
        memset(blob, 0, sizeof *blob);
1046 3
        blob->len = sizeof *p;
1047 3
        blob->priv = p;
1048
1049 3
        return (blob);
1050
}