varnish-cache/vmod/vmod_directors_shard.c
1
/*-
2
 * Copyright 2009-2018 UPLEX - Nils Goroll Systemoptimierung
3
 * All rights reserved.
4
 *
5
 * Authors: Julian Wiesener <jw@uplex.de>
6
 *          Nils Goroll <slink@uplex.de>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 */
31
32
#include "config.h"
33
34
#include <stdlib.h>
35
#include <string.h>
36
37
#include "cache/cache.h"
38
#include "vcl.h"
39
40
#include "vend.h"
41
42
#include "vcc_directors_if.h"
43
#include "vmod_directors_shard_dir.h"
44
#include "vmod_directors_shard_cfg.h"
45
#include "vsb.h"
46
47
/* -------------------------------------------------------------------------
48
 *  shard director: LAZY mode (vdi resolve function), parameter objects
49
 *
50
 *  By associating a parameter object with a shard director, we enable LAZY
51
 *  lookups as with the other directors. Parameter objects are defined with VCL
52
 *  scope (normal vmod objects), but can be overridden per backend request using
53
 *  a task priv.
54
 *
55
 *  We use the same concept to carry shard.backend() parameters to vdi resolve
56
 *  for LAZY mode: They get saved in a per-director task scope parameter object.
57
 *
58
 *  Each object points to another object providing defaults for values which are
59
 *  not defined.
60
 *
61
 *  Actual resolution of the various parameter objects does not happen before
62
 *  they are used, which enables changing them independently (ie, shard
63
 *  .backend() parameters have precedence over an associated parameter object,
64
 *  which by itself can be overridden).
65
 *
66
 *  Overview of parameter objects (pointers are alternatives)
67
 *
68
 *  shard() director        shard_param() object    default praram
69
 *
70
 *               --------------------------------->   vmod static
71
 *    VCL obj   /                                ->
72
 *    .param  -+--------->    VCL obj           /  _
73
 *                            .default  --------   /|
74
 *                                                /
75
 *                               ^               /
76
 *                               |              /
77
 *                                             /
78
 *                            .default        /
79
 *          ------------->    TASK priv      /
80
 *         /                                /
81
 *    .default -----------------------------
82
 *    TASK priv
83
 */
84
85
/* -------------------------------------------------------------------------
86
 * method arguments and set parameters bitmask in vmod_directors_shard_param
87
 */
88
89
#define arg_by          ((uint32_t)1)
90
#define arg_key         ((uint32_t)1 << 1)
91
#define arg_key_blob    ((uint32_t)1 << 2)
92
#define arg_alt         ((uint32_t)1 << 3)
93
#define arg_warmup      ((uint32_t)1 << 4)
94
#define arg_rampup      ((uint32_t)1 << 5)
95
#define arg_healthy     ((uint32_t)1 << 6)
96
#define arg_param       ((uint32_t)1 << 7)
97
#define arg_resolve     ((uint32_t)1 << 8)
98
#define arg_mask_       ((arg_resolve << 1) - 1)
99
/* allowed in shard_param.set */
100
#define arg_mask_set_   (arg_param - 1)
101
/* allowed in shard_param */
102
#define arg_mask_param_ ( arg_mask_set_         \
103
                          & ~arg_key                    \
104
                          & ~arg_key_blob )
105
106
/* -------------------------------------------------------------------------
107
 * shard parameters - declaration & defaults
108
 */
109
enum vmod_directors_shard_param_scope {
110
        _SCOPE_INVALID = 0,
111
        SCOPE_VMOD,
112
        SCOPE_VCL,
113
        SCOPE_TASK,
114
        SCOPE_STACK
115
};
116
117
struct vmod_directors_shard_param;
118
119
#define VMOD_SHARD_SHARD_PARAM_BLOB             0xdf5ca116
120
121
struct vmod_directors_shard_param {
122
        unsigned                                magic;
123
#define VMOD_SHARD_SHARD_PARAM_MAGIC            0xdf5ca117
124
125
        /* internals */
126
        uint32_t                                key;
127
        const char                              *vcl_name;
128
        const struct vmod_directors_shard_param *defaults;
129
        enum vmod_directors_shard_param_scope   scope;
130
131
        /* parameters */
132
        VCL_ENUM                                by;
133
        VCL_ENUM                                healthy;
134
        uint32_t                                mask;
135
        VCL_BOOL                                rampup;
136
        VCL_INT                                 alt;
137
        VCL_REAL                                warmup;
138
};
139
140
static const struct vmod_directors_shard_param shard_param_default = {
141
        .magic          = VMOD_SHARD_SHARD_PARAM_MAGIC,
142
143
        .key            = 0,
144
        .vcl_name       = "builtin defaults",
145
        .defaults       = NULL,
146
        .scope          = SCOPE_VMOD,
147
148
        .mask           = arg_mask_param_,
149
        .rampup = 1,
150
        .alt            = 0,
151
        .warmup         = -1,
152
};
153
154
#define default_by(ptr) (ptr == NULL ? VENUM(HASH) : ptr)
155
#define default_healthy(ptr) (ptr == NULL ? VENUM(CHOSEN) : ptr)
156
157
static struct vmod_directors_shard_param *
158
shard_param_stack(struct vmod_directors_shard_param *p,
159
    const struct vmod_directors_shard_param *pa, const char *who);
160
161
static const struct vmod_directors_shard_param *
162
shard_param_task_r(VRT_CTX, const void *id, const char *who,
163
    const struct vmod_directors_shard_param *pa);
164
165
static struct vmod_directors_shard_param *
166
shard_param_task_l(VRT_CTX, const void *id, const char *who,
167
    const struct vmod_directors_shard_param *pa);
168
169
static const struct vmod_directors_shard_param *
170
shard_param_blob(VCL_BLOB blob);
171
172
static const struct vmod_directors_shard_param *
173
vmod_shard_param_read(VRT_CTX, const void *id, const char *who,
174
    const struct vmod_directors_shard_param *p,
175
    struct vmod_directors_shard_param *pstk);
176
177
// XXX #3329 #3330 revisit - for now, treat pipe like backend
178
#define SHARD_VCL_TASK_REQ (VCL_MET_TASK_C & ~VCL_MET_PIPE)
179
#define SHARD_VCL_TASK_BEREQ (VCL_MET_TASK_B | VCL_MET_PIPE)
180
/* -------------------------------------------------------------------------
181
 * shard vmod interface
182
 */
183
static vdi_healthy_f vmod_shard_healthy;
184
static vdi_resolve_f vmod_shard_resolve;
185
static vdi_list_f vmod_shard_list;
186
187
struct vmod_directors_shard {
188
        unsigned                                magic;
189
#define VMOD_SHARD_SHARD_MAGIC                  0x6e63e1bf
190
        struct sharddir                         *shardd;
191
        VCL_BACKEND                             dir;
192
};
193
194
static void
195 550
shard__assert(void)
196
{
197
        VCL_INT t1;
198
        uint32_t t2a, t2b;
199
200
        /* we put our uint32 key in a VCL_INT container */
201 550
        assert(sizeof(VCL_INT) >= sizeof(uint32_t));
202 550
        t2a = UINT32_MAX;
203 550
        t1 = (VCL_INT)t2a;
204 550
        t2b = (uint32_t)t1;
205 550
        assert(t2a == t2b);
206 550
}
207
208
static void v_matchproto_(vdi_destroy_f)
209 110
vmod_shard_destroy(VCL_BACKEND dir)
210
{
211
        struct sharddir *shardd;
212
213 110
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
214 110
        sharddir_delete(&shardd);
215 110
}
216
217
static const struct vdi_methods vmod_shard_methods[1] = {{
218
        .magic =        VDI_METHODS_MAGIC,
219
        .type =         "shard",
220
        .resolve =      vmod_shard_resolve,
221
        .healthy =      vmod_shard_healthy,
222
        .destroy =      vmod_shard_destroy,
223
        .list =         vmod_shard_list
224
}};
225
226
227
VCL_VOID v_matchproto_(td_directors_shard__init)
228 550
vmod_shard__init(VRT_CTX, struct vmod_directors_shard **vshardp,
229
    const char *vcl_name)
230
{
231
        struct vmod_directors_shard *vshard;
232
233 550
        shard__assert();
234
235 550
        AN(vshardp);
236 550
        AZ(*vshardp);
237 550
        ALLOC_OBJ(vshard, VMOD_SHARD_SHARD_MAGIC);
238 550
        AN(vshard);
239
240 550
        *vshardp = vshard;
241 550
        sharddir_new(&vshard->shardd, vcl_name, &shard_param_default);
242
243 1100
        vshard->dir = VRT_AddDirector(ctx, vmod_shard_methods, vshard->shardd,
244 550
            "%s", vcl_name);
245 550
}
246
247
VCL_VOID v_matchproto_(td_directors_shard__fini)
248 110
vmod_shard__fini(struct vmod_directors_shard **vshardp)
249
{
250
        struct vmod_directors_shard *vshard;
251
252 110
        TAKE_OBJ_NOTNULL(vshard, vshardp, VMOD_SHARD_SHARD_MAGIC);
253 110
        VRT_DelDirector(&vshard->dir);
254 110
        FREE_OBJ(vshard);
255 110
}
256
257
VCL_INT v_matchproto_(td_directors_shard_key)
258 242
vmod_shard_key(VRT_CTX, struct vmod_directors_shard *vshard, VCL_STRANDS s)
259
{
260
261 242
        (void)ctx;
262 242
        (void)vshard;
263
264 242
        return ((VCL_INT)VRT_HashStrands32(s));
265
}
266
267
VCL_VOID v_matchproto_(td_directors_set_warmup)
268 88
vmod_shard_set_warmup(VRT_CTX, struct vmod_directors_shard *vshard,
269
    VCL_REAL probability)
270
{
271 88
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
272 88
        if (probability < 0 || probability >= 1) {
273 44
                shard_notice(ctx->vsl, vshard->shardd->name,
274
                    ".set_warmup(%f) ignored", probability);
275 44
                return;
276
        }
277 44
        shardcfg_set_warmup(vshard->shardd, probability);
278 88
}
279
280
VCL_VOID v_matchproto_(td_directors_set_rampup)
281 44
vmod_shard_set_rampup(VRT_CTX, struct vmod_directors_shard *vshard,
282
    VCL_DURATION duration)
283
{
284 44
        (void)ctx;
285 44
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
286 44
        shardcfg_set_rampup(vshard->shardd, duration);
287 44
}
288
289
VCL_VOID v_matchproto_(td_directors_shard_associate)
290 88
vmod_shard_associate(VRT_CTX,
291
    struct vmod_directors_shard *vshard, VCL_BLOB b)
292
{
293
        const struct vmod_directors_shard_param *ppt;
294 88
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
295
296 88
        if (b == NULL) {
297 22
                sharddir_set_param(vshard->shardd, &shard_param_default);
298 22
                return;
299
        }
300
301 66
        ppt = shard_param_blob(b);
302
303 66
        if (ppt == NULL) {
304 22
                shard_fail(ctx, vshard->shardd->name, "%s",
305
                    "shard .associate param invalid");
306 22
                return;
307
        }
308
309 44
        sharddir_set_param(vshard->shardd, ppt);
310 88
}
311
312
VCL_BOOL v_matchproto_(td_directors_shard_add_backend)
313 2508
vmod_shard_add_backend(VRT_CTX, struct vmod_directors_shard *vshard,
314
    struct VARGS(shard_add_backend) *args)
315
{
316 2508
        VCL_REAL weight = 1;
317
318 2508
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
319
320 2508
        if (args->backend == NULL) {
321 22
                shard_fail(ctx, vshard->shardd->name, "%s",
322
                    "None backend cannot be added");
323 22
                return (0);
324
        }
325
326 2486
        if (args->valid_weight) {
327 66
                if (args->weight >= 1)
328 44
                        weight = args->weight;
329
                else
330 22
                        shard_notice(ctx->vsl, vshard->shardd->name,
331
                            ".add_backend(weight=%f) ignored", args->weight);
332 66
        }
333
334 4972
        return (shardcfg_add_backend(ctx, vshard->shardd, args->backend,
335 2486
            args->valid_ident ? args->ident : NULL,
336 2486
            args->valid_rampup ? args->rampup : nan(""),
337 2486
            weight));
338 2508
}
339
340
VCL_BOOL v_matchproto_(td_directors_shard_remove_backend)
341 660
vmod_shard_remove_backend(VRT_CTX, struct vmod_directors_shard *vshard,
342
    struct VARGS(shard_remove_backend) *args)
343
{
344 660
        VCL_BACKEND be = args->valid_backend ? args->backend : NULL;
345 660
        VCL_STRING ident = args->valid_ident ? args->ident : NULL;
346
347 660
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
348
349 660
        if (be == NULL && ident == NULL) {
350 22
                shard_fail(ctx, vshard->shardd->name, "%s",
351
                    ".remove_backend(): either backend or ident are required");
352 22
                return (0);
353
        }
354
355 638
        return (shardcfg_remove_backend(ctx, vshard->shardd, be, ident));
356 660
}
357
358
VCL_BOOL v_matchproto_(td_directors_shard_clear)
359 330
vmod_shard_clear(VRT_CTX, struct vmod_directors_shard *vshard)
360
{
361 330
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
362 330
        return (shardcfg_clear(ctx, vshard->shardd));
363
}
364
365
VCL_BOOL v_matchproto_(td_directors_shard_reconfigure)
366 924
vmod_shard_reconfigure(VRT_CTX, struct vmod_directors_shard *vshard,
367
    VCL_INT replicas)
368
{
369 924
        return (shardcfg_reconfigure(ctx, vshard->shardd, replicas));
370
}
371
372
static inline uint32_t
373 2838
shard_get_key(VRT_CTX, const struct vmod_directors_shard_param *p)
374
{
375
        struct http *http;
376
        struct strands s[1];
377
        const char *sp[1];
378 2838
        VCL_ENUM by = default_by(p->by);
379
380 2838
        if (by == VENUM(KEY) || by == VENUM(BLOB))
381 2244
                return (p->key);
382 594
        if (by == VENUM(HASH) && ctx->bo != NULL) {
383 154
                CHECK_OBJ(ctx->bo, BUSYOBJ_MAGIC);
384 154
                return (vbe32dec(ctx->bo->digest));
385
        }
386 440
        if (by == VENUM(HASH) || by == VENUM(URL)) {
387 440
                if (ctx->http_req) {
388 308
                        AN(http = ctx->http_req);
389 308
                } else {
390 132
                        AN(ctx->http_bereq);
391 132
                        AN(http = ctx->http_bereq);
392
                }
393 440
                sp[0] = http->hd[HTTP_HDR_URL].b;
394 440
                s->n = 1;
395 440
                s->p = sp;
396 440
                return (VRT_HashStrands32(s));
397
        }
398 0
        WRONG("by enum");
399 2838
}
400
401
/*
402
 * merge parameters to resolve all undef values
403
 * key is to be calculated after merging
404
 */
405
static void
406 12672
shard_param_merge(struct vmod_directors_shard_param *to,
407
                  const struct vmod_directors_shard_param *from)
408
{
409 12672
        CHECK_OBJ_NOTNULL(to, VMOD_SHARD_SHARD_PARAM_MAGIC);
410 12672
        assert((to->mask & ~arg_mask_param_) == 0);
411
412 12672
        if (to->mask == arg_mask_param_)
413 0
                return;
414
415 12672
        CHECK_OBJ_NOTNULL(from, VMOD_SHARD_SHARD_PARAM_MAGIC);
416 12672
        assert((from->mask & ~arg_mask_param_) == 0);
417
418 12672
        if ((to->mask & arg_by) == 0 && (from->mask & arg_by) != 0) {
419 4598
                to->by = from->by;
420 4598
                if (from->by == VENUM(KEY) || from->by == VENUM(BLOB))
421 2244
                        to->key = from->key;
422 4598
        }
423
424
#define mrg(to, from, field) do {                                       \
425
                if (((to)->mask & arg_ ## field) == 0 &&                \
426
                    ((from)->mask & arg_ ## field) != 0)                \
427
                        (to)->field = (from)->field;                    \
428
        } while(0)
429
430 12672
        mrg(to, from, healthy);
431 12672
        mrg(to, from, rampup);
432 12672
        mrg(to, from, alt);
433 12672
        mrg(to, from, warmup);
434
#undef mrg
435
436 12672
        to->mask |= from->mask;
437
438 12672
        if (to->mask == arg_mask_param_)
439 5588
                return;
440
441 7084
        AN(from->defaults);
442 7084
        shard_param_merge(to, from->defaults);
443 12672
}
444
445
static uint32_t
446 220
shard_blob_key(VCL_BLOB key_blob)
447
{
448 220
        uint8_t k[4] = { 0 };
449
        const uint8_t *b;
450
        size_t i, ki;
451
452 220
        AN(key_blob);
453 220
        AN(key_blob->blob);
454 220
        assert(key_blob->len > 0);
455
456 220
        if (key_blob->len >= 4)
457 220
                ki = 0;
458
        else
459 0
                ki = 4 - key_blob->len;
460
461 220
        b = key_blob->blob;
462 1100
        for (i = 0; ki < 4; i++, ki++)
463 880
                k[ki] = b[i];
464 220
        assert(i <= key_blob->len);
465
466 220
        return (vbe32dec(k));
467
}
468
469
/*
470
 * convert vmod interface valid_* to our bitmask
471
 */
472
473
#define tobit(args, name) ((args)->valid_##name ? arg_##name : 0)
474
475
static uint32_t
476 2354
shard_backendarg_mask_(const struct VARGS(shard_backend) * const a)
477
{
478 7062
        return (tobit(a, by)            |
479 4708
                tobit(a, key)           |
480 4708
                tobit(a, key_blob)      |
481 4708
                tobit(a, alt)           |
482 4708
                tobit(a, warmup)        |
483 4708
                tobit(a, rampup)        |
484 4708
                tobit(a, healthy)       |
485 4708
                tobit(a, param)         |
486 2354
                tobit(a, resolve));
487
}
488
static uint32_t
489 2178
shard_param_set_mask(const struct VARGS(shard_param_set) * const a)
490
{
491 6534
        return (tobit(a, by)            |
492 4356
                tobit(a, key)           |
493 4356
                tobit(a, key_blob)      |
494 4356
                tobit(a, alt)           |
495 4356
                tobit(a, warmup)        |
496 4356
                tobit(a, rampup)        |
497 2178
                tobit(a, healthy));
498
}
499
#undef tobit
500
501
/*
502
 * check arguments and return in a struct param
503
 */
504
static struct vmod_directors_shard_param *
505 4290
shard_param_args(VRT_CTX,
506
    struct vmod_directors_shard_param *p, const char *func,
507
    uint32_t args, VCL_ENUM by_s, VCL_INT key_int, VCL_BLOB key_blob,
508
    VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy_s)
509
{
510
511 4290
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
512 4290
        AN(p->vcl_name);
513
514 4290
        assert((args & ~arg_mask_set_) == 0);
515
516 4290
        if (!(args & arg_by))
517 1100
                by_s = NULL;
518 4290
        by_s = default_by(by_s);
519
520
        /* by_s / key_int / key_blob */
521 4290
        if (by_s == VENUM(KEY)) {
522 1760
                if ((args & arg_key) == 0) {
523 22
                        shard_fail(ctx, p->vcl_name,
524
                            "%s missing key argument with by=%s",
525
                            func, by_s);
526 22
                        return (NULL);
527
                }
528 1738
                if (key_int < 0 || key_int > UINT32_MAX) {
529 22
                        shard_fail(ctx, p->vcl_name,
530
                            "%s invalid key argument %jd with by=%s",
531
                            func, (intmax_t)key_int, by_s);
532 22
                        return (NULL);
533
                }
534 1716
                assert(key_int >= 0);
535 1716
                assert(key_int <= UINT32_MAX);
536 1716
                p->key = (uint32_t)key_int;
537 4246
        } else if (by_s == VENUM(BLOB)) {
538 264
                if ((args & arg_key_blob) == 0) {
539 22
                        shard_fail(ctx, p->vcl_name,
540
                            "%s missing key_blob argument with by=%s",
541
                            func, by_s);
542 22
                        return (NULL);
543
                }
544 242
                if (key_blob == NULL || key_blob->len == 0 ||
545 220
                    key_blob->blob == NULL) {
546 22
                        shard_err(ctx->vsl, p->vcl_name,
547
                            "%s by=BLOB but no or empty key_blob - using key 0",
548
                            func);
549 22
                        p->key = 0;
550 22
                } else
551 220
                        p->key = shard_blob_key(key_blob);
552 2508
        } else if (by_s == VENUM(HASH) || by_s == VENUM(URL)) {
553 2266
                if (args & (arg_key|arg_key_blob)) {
554 44
                        shard_fail(ctx, p->vcl_name,
555
                            "%s key and key_blob arguments are "
556
                            "invalid with by=%s", func, by_s);
557 44
                        return (NULL);
558
                }
559 2222
        } else {
560 0
                WRONG("by enum");
561
        }
562 4180
        p->by = by_s;
563
564 4180
        if (args & arg_alt) {
565 1386
                if (alt < 0) {
566 22
                        shard_fail(ctx, p->vcl_name,
567
                            "%s invalid alt argument %jd",
568
                            func, (intmax_t)alt);
569 22
                        return (NULL);
570
                }
571 1364
                p->alt = alt;
572 1364
        }
573
574 4158
        if (args & arg_warmup) {
575 418
                if ((warmup < 0 && warmup != -1) || warmup > 1) {
576 44
                        shard_fail(ctx, p->vcl_name,
577
                            "%s invalid warmup argument %f",
578
                            func, warmup);
579 44
                        return (NULL);
580
                }
581 374
                p->warmup = warmup;
582 374
        }
583
584 4114
        if (args & arg_rampup)
585 132
                p->rampup = !!rampup;
586
587 4114
        if (args & arg_healthy)
588 726
                p->healthy = healthy_s;
589
590 4114
        p->mask = args & arg_mask_param_;
591 4114
        return (p);
592 4290
}
593
594
VCL_BACKEND v_matchproto_(td_directors_shard_backend)
595 2354
vmod_shard_backend(VRT_CTX, struct vmod_directors_shard *vshard,
596
                   struct VARGS(shard_backend) *a)
597
{
598
        struct sharddir *shardd;
599
        struct vmod_directors_shard_param pstk;
600 2354
        struct vmod_directors_shard_param *pp = NULL;
601
        const struct vmod_directors_shard_param *ppt;
602
        VCL_ENUM resolve;
603 2354
        uint32_t args = shard_backendarg_mask_(a);
604
605 2354
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
606 2354
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
607 2354
        shardd = vshard->shardd;
608 2354
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
609 2354
        assert((args & ~arg_mask_) == 0);
610
611 2354
        if (args & arg_resolve)
612 1078
                resolve = a->resolve;
613 1276
        else if (ctx->method & VCL_MET_TASK_H)
614 22
                resolve = VENUM(LAZY);
615
        else
616 1254
                resolve = VENUM(NOW);
617
618 2354
        if (resolve == VENUM(LAZY)) {
619 352
                if ((args & ~arg_resolve) == 0) {
620 154
                        AN(vshard->dir);
621 154
                        return (vshard->dir);
622
                }
623
624 198
                if ((ctx->method & SHARD_VCL_TASK_BEREQ) == 0) {
625 22
                        shard_fail(ctx, shardd->name, "%s",
626
                            ".backend(resolve=LAZY) with other "
627
                            "parameters can only be used in backend/pipe "
628
                            "context");
629 22
                        return (NULL);
630
                }
631
632 352
                pp = shard_param_task_l(ctx, shardd, shardd->name,
633 176
                    shardd->param);
634 176
                if (pp == NULL)
635 0
                        return (NULL);
636 2178
        } else if (resolve == VENUM(NOW)) {
637 2002
                if (ctx->method & VCL_MET_TASK_H) {
638 22
                        shard_fail(ctx, shardd->name, "%s",
639
                            ".backend(resolve=NOW) can not be "
640
                            "used in vcl_init{}/vcl_fini{}");
641 22
                        return (NULL);
642
                }
643 3960
                ppt = shard_param_task_r(ctx, shardd, shardd->name,
644 1980
                    shardd->param);
645 1980
                AN(ppt);
646 1980
                pp = shard_param_stack(&pstk, ppt, shardd->name);
647 1980
        } else {
648 0
                WRONG("resolve enum");
649
        }
650
651 2156
        AN(pp);
652
653 2156
        if (args & arg_param) {
654 66
                ppt = shard_param_blob(a->param);
655 66
                if (ppt == NULL) {
656 22
                        shard_fail(ctx, shardd->name, "%s",
657
                            ".backend(key_blob) param invalid");
658 22
                        return (NULL);
659
                }
660 44
                pp->defaults = ppt;
661 44
        }
662
663 4268
        pp = shard_param_args(ctx, pp, "shard.backend()",
664 2134
                              args & arg_mask_set_,
665 2134
                              a->by, a->key, a->key_blob, a->alt, a->warmup,
666 2134
                              a->rampup, a->healthy);
667 2134
        if (pp == NULL)
668 0
                return (NULL);
669
670 2134
        if (resolve == VENUM(LAZY))
671 176
                return (vshard->dir);
672
673 1958
        assert(resolve == VENUM(NOW));
674 1958
        shard_param_merge(pp, pp->defaults);
675 3916
        return (sharddir_pick_be(ctx, shardd, shard_get_key(ctx, pp),
676 1958
            pp->alt, pp->warmup, pp->rampup, pp->healthy));
677 2354
}
678
679
static VCL_BOOL v_matchproto_(vdi_healthy)
680 352
vmod_shard_healthy(VRT_CTX, VCL_BACKEND dir, VCL_TIME *changed)
681
{
682
        struct sharddir *shardd;
683
684 352
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
685 352
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
686 352
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
687 352
        return (sharddir_any_healthy(ctx, shardd, changed));
688
}
689
690
static VCL_BACKEND v_matchproto_(vdi_resolve_f)
691 330
vmod_shard_resolve(VRT_CTX, VCL_BACKEND dir)
692
{
693
        struct sharddir *shardd;
694
        struct vmod_directors_shard_param pstk[1];
695
        const struct vmod_directors_shard_param *pp;
696
697 330
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
698 330
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
699 330
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
700
701 660
        pp = vmod_shard_param_read(ctx, shardd, shardd->name,
702 330
            shardd->param, pstk);
703 330
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
704
705 660
        return (sharddir_pick_be(ctx, shardd,
706 330
                                 shard_get_key(ctx, pp), pp->alt, pp->warmup,
707 330
                                 pp->rampup, pp->healthy));
708
}
709
710
static void v_matchproto_(vdi_list_f)
711 660
vmod_shard_list(VRT_CTX, VCL_BACKEND dir, struct vsb *vsb, int pflag, int jflag)
712
{
713
        struct sharddir *shardd;
714
        struct shard_backend *sbe;
715 660
        VCL_TIME c, changed = 0;
716
        VCL_DURATION rampup_d, d;
717
        VCL_BACKEND be;
718
        VCL_BOOL h;
719 660
        unsigned i, nh = 0;
720
        double rampup_p;
721
722 660
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
723 660
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
724 660
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
725
726 660
        if (pflag) {
727 88
                if (jflag) {
728 22
                        VSB_cat(vsb, "{\n");
729 22
                        VSB_indent(vsb, 2);
730 22
                        VSB_printf(vsb, "\"warmup\": %f,\n", shardd->warmup);
731 44
                        VSB_printf(vsb, "\"rampup_duration\": %f,\n",
732 22
                            shardd->rampup_duration);
733 22
                        VSB_cat(vsb, "\"backends\": {\n");
734 22
                        VSB_indent(vsb, 2);
735 22
                } else {
736 66
                        VSB_cat(vsb, "\n\n\tBackend\tIdent\tHealth\t"
737
                            "Rampup  Remaining\n");
738
                }
739 88
        }
740
741 660
        sharddir_rdlock(shardd);
742 2662
        for (i = 0; i < shardd->n_backend; i++) {
743 2002
                sbe = &shardd->backend[i];
744 2002
                AN(sbe);
745 2002
                be = sbe->backend;
746 2002
                CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC);
747
748 2002
                c = 0;
749 2002
                h = VRT_Healthy(ctx, be, &c);
750 2002
                if (h)
751 1804
                        nh++;
752 2002
                if (c > changed)
753 1540
                        changed = c;
754 2002
                if ((pflag) == 0)
755 1650
                        continue;
756
757 352
                d = ctx->now - c;
758 352
                rampup_d = shardcfg_get_rampup(shardd, i);
759 352
                if (! h) {
760 44
                        rampup_p = 0.0;
761 44
                        rampup_d = 0.0;
762 352
                } else if (d < rampup_d) {
763 132
                        rampup_p = d / rampup_d;
764 132
                        rampup_d -= d;
765 132
                } else {
766 176
                        rampup_p = 1.0;
767 176
                        rampup_d = 0.0;
768
                }
769
770 352
                if (jflag) {
771 88
                        if (i)
772 66
                                VSB_cat(vsb, ",\n");
773 176
                        VSB_printf(vsb, "\"%s\": {\n",
774 88
                            be->vcl_name);
775 88
                        VSB_indent(vsb, 2);
776 176
                        VSB_printf(vsb, "\"ident\": \"%s\",\n",
777 88
                            sbe->ident ? sbe->ident : be->vcl_name);
778 176
                        VSB_printf(vsb, "\"health\": \"%s\",\n",
779 88
                            h ? "healthy" : "sick");
780 88
                        VSB_printf(vsb, "\"rampup\": %f,\n", rampup_p);
781 176
                        VSB_printf(vsb, "\"rampup_remaining\": %.3f\n",
782 88
                            rampup_d);
783 88
                        VSB_indent(vsb, -2);
784 88
                        VSB_cat(vsb, "}");
785 88
                } else {
786 528
                        VSB_printf(vsb, "\t%s\t%s\t%s\t%6.2f%% %8.3fs\n",
787 264
                            be->vcl_name,
788 264
                            sbe->ident ? sbe->ident : be->vcl_name,
789 264
                            h ? "healthy" : "sick",
790 264
                            rampup_p * 100, rampup_d);
791
                }
792 352
        }
793 660
        sharddir_unlock(shardd);
794
795 660
        if (jflag && (pflag)) {
796 22
                VSB_cat(vsb, "\n");
797 22
                VSB_indent(vsb, -2);
798 22
                VSB_cat(vsb, "}\n");
799 22
                VSB_indent(vsb, -2);
800 22
                VSB_cat(vsb, "},\n");
801 22
        }
802
803 660
        if (pflag)
804 88
                return;
805
806 572
        if (jflag)
807 88
                VSB_printf(vsb, "[%u, %u, \"%s\"]", nh, i,
808 44
                    nh ? "healthy" : "sick");
809
        else
810 528
                VSB_printf(vsb, "%u/%u\t%s", nh, i, nh ? "healthy" : "sick");
811 660
}
812
813
VCL_VOID v_matchproto_(td_directors_shard_backend)
814 154
vmod_shard_debug(VRT_CTX, struct vmod_directors_shard *vshard,
815
    VCL_INT i)
816
{
817 154
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
818
819 154
        (void)ctx;
820 154
        sharddir_debug(vshard->shardd, i & UINT32_MAX);
821 154
}
822
823
/* =============================================================
824
 * shard_param
825
 */
826
827
VCL_VOID v_matchproto_(td_directors_shard_param__init)
828 374
vmod_shard_param__init(VRT_CTX,
829
    struct vmod_directors_shard_param **pp, const char *vcl_name)
830
{
831
        struct vmod_directors_shard_param *p;
832
833 374
        (void) ctx;
834 374
        AN(pp);
835 374
        AZ(*pp);
836 374
        ALLOC_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
837 374
        AN(p);
838 374
        p->vcl_name = vcl_name;
839 374
        p->scope = SCOPE_VCL;
840 374
        p->defaults = &shard_param_default;
841
842 374
        *pp = p;
843 374
}
844
845
VCL_VOID v_matchproto_(td_directors_shard_param__fini)
846 176
vmod_shard_param__fini(struct vmod_directors_shard_param **pp)
847
{
848
        struct vmod_directors_shard_param *p;
849
850 176
        TAKE_OBJ_NOTNULL(p, pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
851 176
        FREE_OBJ(p);
852 176
}
853
854
/*
855
 * init a stack param struct defaulting to pa with the given name
856
 */
857
static struct vmod_directors_shard_param *
858 5610
shard_param_stack(struct vmod_directors_shard_param *p,
859
    const struct vmod_directors_shard_param *pa, const char *who)
860
{
861 5610
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
862 5610
        assert(pa->scope > _SCOPE_INVALID);
863
864 5610
        AN(p);
865 5610
        INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
866 5610
        p->vcl_name = who;
867 5610
        p->scope = SCOPE_STACK;
868 5610
        p->defaults = pa;
869
870 5610
        return (p);
871
}
872
873
static const struct vmod_directors_shard_param *
874 5698
shard_param_task_r(VRT_CTX, const void *id, const char *who,
875
   const struct vmod_directors_shard_param *pa)
876
{
877
        const struct vmod_directors_shard_param *p;
878
        const struct vmod_priv *task;
879
        const void *task_id;
880
881 5698
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
882 5698
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
883 5698
        assert(pa->scope > _SCOPE_INVALID);
884
885 5698
        task_id = (const char *)id + task_off_param;
886 5698
        task = VRT_priv_task_get(ctx, task_id);
887
888 5698
        if (task) {
889 2992
                CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC);
890 2992
                assert(p->scope == SCOPE_TASK);
891 2992
                assert(who == p->vcl_name);
892 2992
                return (p);
893
        }
894
895 2706
        if (id == pa || pa->scope != SCOPE_VCL)
896 1958
                return (pa);
897
898 748
        return (shard_param_task_r(ctx, pa, pa->vcl_name, pa));
899 5698
}
900
901
/*
902
 * get a task scoped param struct for id defaulting to pa
903
 * if id != pa and pa has VCL scope, also get a task scoped param struct for pa
904
 */
905
static struct vmod_directors_shard_param *
906 2156
shard_param_task_l(VRT_CTX, const void *id, const char *who,
907
   const struct vmod_directors_shard_param *pa)
908
{
909
        struct vmod_directors_shard_param *p;
910
        struct vmod_priv *task;
911
        const void *task_id;
912
913 2156
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
914 2156
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
915 2156
        assert(pa->scope > _SCOPE_INVALID);
916
917 2156
        task_id = (const char *)id + task_off_param;
918 2156
        task = VRT_priv_task(ctx, task_id);
919
920 2156
        if (task == NULL) {
921 0
                shard_fail(ctx, who, "%s", "no priv_task");
922 0
                return (NULL);
923
        }
924
925 2156
        if (task->priv) {
926 176
                CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC);
927 176
                assert(p->scope == SCOPE_TASK);
928 176
                assert(who == p->vcl_name);
929 176
                return (p);
930
        }
931
932 1980
        p = WS_Alloc(ctx->ws, sizeof *p);
933 1980
        if (p == NULL) {
934 0
                shard_fail(ctx, who, "%s", "WS_Alloc failed");
935 0
                return (NULL);
936
        }
937 1980
        task->priv = p;
938 1980
        INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
939 1980
        p->vcl_name = who;
940 1980
        p->scope = SCOPE_TASK;
941
942 1980
        if (id == pa || pa->scope != SCOPE_VCL)
943 1936
                p->defaults = pa;
944
        else
945 44
                p->defaults = shard_param_task_l(ctx, pa, pa->vcl_name, pa);
946
947 1980
        if (p->defaults == NULL)
948 0
                return (NULL);
949
950 1980
        return (p);
951 2156
}
952
953
static struct vmod_directors_shard_param *
954 2288
shard_param_prep(VRT_CTX, struct vmod_directors_shard_param *p,
955
    const char *who)
956
{
957 2288
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
958 2288
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
959
960 2288
        if (ctx->method & SHARD_VCL_TASK_REQ) {
961 22
                shard_fail(ctx, p->vcl_name, "%s may only be used "
962
                    "in vcl_init and in backend/pipe context", who);
963 22
                return (NULL);
964 2266
        } else if (ctx->method & SHARD_VCL_TASK_BEREQ)
965 1936
                p = shard_param_task_l(ctx, p, p->vcl_name, p);
966
        else
967 330
                assert(ctx->method & VCL_MET_TASK_H);
968
969 2266
        return (p);
970 2288
}
971
972
VCL_VOID v_matchproto_(td_directors_shard_param_set)
973 2178
vmod_shard_param_set(VRT_CTX, struct vmod_directors_shard_param *p,
974
                     struct VARGS(shard_param_set) *a)
975
{
976 2178
        uint32_t args = shard_param_set_mask(a);
977
978 2178
        assert((args & ~arg_mask_set_) == 0);
979
980 2178
        p = shard_param_prep(ctx, p, "shard_param.set()");
981 2178
        if (p == NULL)
982 22
                return;
983 4312
        (void) shard_param_args(ctx, p, "shard_param.set()", args,
984 2156
                                a->by, a->key, a->key_blob, a->alt, a->warmup,
985 2156
                                a->rampup, a->healthy);
986 2178
}
987
988
VCL_VOID v_matchproto_(td_directors_shard_param_clear)
989 110
vmod_shard_param_clear(VRT_CTX,
990
    struct vmod_directors_shard_param *p)
991
{
992 110
        p = shard_param_prep(ctx, p, "shard_param.clear()");
993 110
        if (p == NULL)
994 0
                return;
995 110
        p->mask = 0;
996 110
}
997
998
static const struct vmod_directors_shard_param *
999 3630
vmod_shard_param_read(VRT_CTX, const void *id, const char *who,
1000
    const struct vmod_directors_shard_param *p,
1001
    struct vmod_directors_shard_param *pstk)
1002
{
1003
        struct vmod_directors_shard_param *pp;
1004
1005 3630
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
1006 3630
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1007
1008 3630
        if (ctx->method == 0 || (ctx->method & SHARD_VCL_TASK_BEREQ))
1009 2970
                p = shard_param_task_r(ctx, id, who, p);
1010
1011 3630
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1012 3630
        pp = shard_param_stack(pstk, p, p->vcl_name);
1013 3630
        shard_param_merge(pp, p);
1014 3630
        return (pp);
1015
}
1016
1017
VCL_STRING v_matchproto_(td_directors_shard_param_get_by)
1018 550
vmod_shard_param_get_by(VRT_CTX,
1019
    struct vmod_directors_shard_param *p)
1020
{
1021
        struct vmod_directors_shard_param pstk;
1022
        const struct vmod_directors_shard_param *pp;
1023
1024 550
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1025 550
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1026 550
        return (default_by(pp->by));
1027
}
1028
1029
VCL_INT v_matchproto_(td_directors_shard_param_get_key)
1030 550
vmod_shard_param_get_key(VRT_CTX,
1031
    struct vmod_directors_shard_param *p)
1032
{
1033
        struct vmod_directors_shard_param pstk;
1034
        const struct vmod_directors_shard_param *pp;
1035
1036 550
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1037 550
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1038 550
        return ((VCL_INT)shard_get_key(ctx, pp));
1039
}
1040
VCL_INT v_matchproto_(td_directors_shard_param_get_alt)
1041 550
vmod_shard_param_get_alt(VRT_CTX,
1042
    struct vmod_directors_shard_param *p)
1043
{
1044
        struct vmod_directors_shard_param pstk;
1045
        const struct vmod_directors_shard_param *pp;
1046
1047 550
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1048 550
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1049 550
        return (pp->alt);
1050
}
1051
1052
VCL_REAL v_matchproto_(td_directors_shard_param_get_warmup)
1053 550
vmod_shard_param_get_warmup(VRT_CTX,
1054
    struct vmod_directors_shard_param *p)
1055
{
1056
        struct vmod_directors_shard_param pstk;
1057
        const struct vmod_directors_shard_param *pp;
1058
1059 550
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1060 550
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1061 550
        return (pp->warmup);
1062
}
1063
1064
VCL_BOOL v_matchproto_(td_directors_shard_param_get_rampup)
1065 550
vmod_shard_param_get_rampup(VRT_CTX,
1066
    struct vmod_directors_shard_param *p)
1067
{
1068
        struct vmod_directors_shard_param pstk;
1069
        const struct vmod_directors_shard_param *pp;
1070
1071 550
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1072 550
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1073 550
        return (pp->rampup);
1074
}
1075
1076
VCL_STRING v_matchproto_(td_directors_shard_param_get_healthy)
1077 550
vmod_shard_param_get_healthy(VRT_CTX,
1078
    struct vmod_directors_shard_param *p)
1079
{
1080
        struct vmod_directors_shard_param pstk;
1081
        const struct vmod_directors_shard_param *pp;
1082
1083 550
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1084 550
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1085 550
        return (default_healthy(pp->healthy));
1086
}
1087
1088
static const struct vmod_directors_shard_param *
1089 132
shard_param_blob(VCL_BLOB blob)
1090
{
1091
        const struct vmod_directors_shard_param *p;
1092
1093 220
        if (blob && blob->type == VMOD_SHARD_SHARD_PARAM_BLOB &&
1094 88
            blob->blob != NULL &&
1095 88
            blob->len == sizeof(struct vmod_directors_shard_param)) {
1096 88
                CAST_OBJ_NOTNULL(p, blob->blob, VMOD_SHARD_SHARD_PARAM_MAGIC);
1097 88
                return (p);
1098
        }
1099
1100 44
        return (NULL);
1101 132
}
1102
1103
VCL_BLOB v_matchproto_(td_directors_shard_param_use)
1104 88
vmod_shard_param_use(VRT_CTX,
1105
    struct vmod_directors_shard_param *p)
1106
{
1107 88
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
1108 88
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1109
1110 88
        return (VRT_blob(ctx, "xshard_param.use()", p, sizeof *p,
1111
            VMOD_SHARD_SHARD_PARAM_BLOB));
1112
}