varnish-cache/vmod/vmod_directors_shard.c
0
/*-
1
 * Copyright 2009-2018 UPLEX - Nils Goroll Systemoptimierung
2
 * All rights reserved.
3
 *
4
 * Authors: Julian Wiesener <jw@uplex.de>
5
 *          Nils Goroll <slink@uplex.de>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 */
30
31
#include "config.h"
32
33
#include <stdlib.h>
34
#include <string.h>
35
36
#include "cache/cache.h"
37
#include "vcl.h"
38
39
#include "vend.h"
40
41
#include "vcc_directors_if.h"
42
#include "vmod_directors_shard_dir.h"
43
#include "vmod_directors_shard_cfg.h"
44
#include "vsb.h"
45
46
/* -------------------------------------------------------------------------
47
 *  shard director: LAZY mode (vdi resolve function), parameter objects
48
 *
49
 *  By associating a parameter object with a shard director, we enable LAZY
50
 *  lookups as with the other directors. Parameter objects are defined with VCL
51
 *  scope (normal vmod objects), but can be overridden per backend request using
52
 *  a task priv.
53
 *
54
 *  We use the same concept to carry shard.backend() parameters to vdi resolve
55
 *  for LAZY mode: They get saved in a per-director task scope parameter object.
56
 *
57
 *  Each object points to another object providing defaults for values which are
58
 *  not defined.
59
 *
60
 *  Actual resolution of the various parameter objects does not happen before
61
 *  they are used, which enables changing them independently (ie, shard
62
 *  .backend() parameters have precedence over an associated parameter object,
63
 *  which by itself can be overridden).
64
 *
65
 *  Overview of parameter objects (pointers are alternatives)
66
 *
67
 *  shard() director        shard_param() object    default praram
68
 *
69
 *               --------------------------------->   vmod static
70
 *    VCL obj   /                                ->
71
 *    .param  -+--------->    VCL obj           /  _
72
 *                            .default  --------   /|
73
 *                                                /
74
 *                               ^               /
75
 *                               |              /
76
 *                                             /
77
 *                            .default        /
78
 *          ------------->    TASK priv      /
79
 *         /                                /
80
 *    .default -----------------------------
81
 *    TASK priv
82
 */
83
84
/* -------------------------------------------------------------------------
85
 * method arguments and set parameters bitmask in vmod_directors_shard_param
86
 */
87
88
#define arg_by          ((uint32_t)1)
89
#define arg_key         ((uint32_t)1 << 1)
90
#define arg_key_blob    ((uint32_t)1 << 2)
91
#define arg_alt         ((uint32_t)1 << 3)
92
#define arg_warmup      ((uint32_t)1 << 4)
93
#define arg_rampup      ((uint32_t)1 << 5)
94
#define arg_healthy     ((uint32_t)1 << 6)
95
#define arg_param       ((uint32_t)1 << 7)
96
#define arg_resolve     ((uint32_t)1 << 8)
97
#define arg_mask_       ((arg_resolve << 1) - 1)
98
/* allowed in shard_param.set */
99
#define arg_mask_set_   (arg_param - 1)
100
/* allowed in shard_param */
101
#define arg_mask_param_ ( arg_mask_set_         \
102
                          & ~arg_key                    \
103
                          & ~arg_key_blob )
104
105
/* -------------------------------------------------------------------------
106
 * shard parameters - declaration & defaults
107
 */
108
enum vmod_directors_shard_param_scope {
109
        _SCOPE_INVALID = 0,
110
        SCOPE_VMOD,
111
        SCOPE_VCL,
112
        SCOPE_TASK,
113
        SCOPE_STACK
114
};
115
116
struct vmod_directors_shard_param;
117
118
#define VMOD_SHARD_SHARD_PARAM_BLOB             0xdf5ca116
119
120
struct vmod_directors_shard_param {
121
        unsigned                                magic;
122
#define VMOD_SHARD_SHARD_PARAM_MAGIC            0xdf5ca117
123
124
        /* internals */
125
        uint32_t                                key;
126
        const char                              *vcl_name;
127
        const struct vmod_directors_shard_param *defaults;
128
        enum vmod_directors_shard_param_scope   scope;
129
130
        /* parameters */
131
        VCL_ENUM                                by;
132
        VCL_ENUM                                healthy;
133
        uint32_t                                mask;
134
        VCL_BOOL                                rampup;
135
        VCL_INT                                 alt;
136
        VCL_REAL                                warmup;
137
};
138
139
static const struct vmod_directors_shard_param shard_param_default = {
140
        .magic          = VMOD_SHARD_SHARD_PARAM_MAGIC,
141
142
        .key            = 0,
143
        .vcl_name       = "builtin defaults",
144
        .defaults       = NULL,
145
        .scope          = SCOPE_VMOD,
146
147
        .mask           = arg_mask_param_,
148
        .rampup = 1,
149
        .alt            = 0,
150
        .warmup         = -1,
151
};
152
153
#define default_by(ptr) (ptr == NULL ? VENUM(HASH) : ptr)
154
#define default_healthy(ptr) (ptr == NULL ? VENUM(CHOSEN) : ptr)
155
156
static struct vmod_directors_shard_param *
157
shard_param_stack(struct vmod_directors_shard_param *p,
158
    const struct vmod_directors_shard_param *pa, const char *who);
159
160
static const struct vmod_directors_shard_param *
161
shard_param_task_r(VRT_CTX, const void *id, const char *who,
162
    const struct vmod_directors_shard_param *pa);
163
164
static struct vmod_directors_shard_param *
165
shard_param_task_l(VRT_CTX, const void *id, const char *who,
166
    const struct vmod_directors_shard_param *pa);
167
168
static const struct vmod_directors_shard_param *
169
shard_param_blob(VCL_BLOB blob);
170
171
static const struct vmod_directors_shard_param *
172
vmod_shard_param_read(VRT_CTX, const void *id, const char *who,
173
    const struct vmod_directors_shard_param *p,
174
    struct vmod_directors_shard_param *pstk);
175
176
// XXX #3329 #3330 revisit - for now, treat pipe like backend
177
#define SHARD_VCL_TASK_REQ (VCL_MET_TASK_C & ~VCL_MET_PIPE)
178
#define SHARD_VCL_TASK_BEREQ (VCL_MET_TASK_B | VCL_MET_PIPE)
179
/* -------------------------------------------------------------------------
180
 * shard vmod interface
181
 */
182
static vdi_healthy_f vmod_shard_healthy;
183
static vdi_resolve_f vmod_shard_resolve;
184
static vdi_list_f vmod_shard_list;
185
186
struct vmod_directors_shard {
187
        unsigned                                magic;
188
#define VMOD_SHARD_SHARD_MAGIC                  0x6e63e1bf
189
        struct sharddir                         *shardd;
190
        VCL_BACKEND                             dir;
191
};
192
193
static void
194 1000
shard__assert(void)
195
{
196
        VCL_INT t1;
197
        uint32_t t2a, t2b;
198
199
        /* we put our uint32 key in a VCL_INT container */
200 1000
        assert(sizeof(VCL_INT) >= sizeof(uint32_t));
201 1000
        t2a = UINT32_MAX;
202 1000
        t1 = (VCL_INT)t2a;
203 1000
        t2b = (uint32_t)t1;
204 1000
        assert(t2a == t2b);
205 1000
}
206
207
static void v_matchproto_(vdi_destroy_f)
208 200
vmod_shard_destroy(VCL_BACKEND dir)
209
{
210
        struct sharddir *shardd;
211
212 200
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
213 200
        sharddir_delete(&shardd);
214 200
}
215
216
static const struct vdi_methods vmod_shard_methods[1] = {{
217
        .magic =        VDI_METHODS_MAGIC,
218
        .type =         "shard",
219
        .resolve =      vmod_shard_resolve,
220
        .healthy =      vmod_shard_healthy,
221
        .destroy =      vmod_shard_destroy,
222
        .list =         vmod_shard_list
223
}};
224
225
226
VCL_VOID v_matchproto_(td_directors_shard__init)
227 1000
vmod_shard__init(VRT_CTX, struct vmod_directors_shard **vshardp,
228
    const char *vcl_name)
229
{
230
        struct vmod_directors_shard *vshard;
231
232 1000
        shard__assert();
233
234 1000
        AN(vshardp);
235 1000
        AZ(*vshardp);
236 1000
        ALLOC_OBJ(vshard, VMOD_SHARD_SHARD_MAGIC);
237 1000
        AN(vshard);
238
239 1000
        *vshardp = vshard;
240 1000
        sharddir_new(&vshard->shardd, vcl_name, &shard_param_default);
241
242 2000
        vshard->dir = VRT_AddDirector(ctx, vmod_shard_methods, vshard->shardd,
243 1000
            "%s", vcl_name);
244 1000
}
245
246
VCL_VOID v_matchproto_(td_directors_shard__fini)
247 200
vmod_shard__fini(struct vmod_directors_shard **vshardp)
248
{
249
        struct vmod_directors_shard *vshard;
250
251 200
        TAKE_OBJ_NOTNULL(vshard, vshardp, VMOD_SHARD_SHARD_MAGIC);
252 200
        VRT_DelDirector(&vshard->dir);
253 200
        FREE_OBJ(vshard);
254 200
}
255
256
VCL_INT v_matchproto_(td_directors_shard_key)
257 440
vmod_shard_key(VRT_CTX, struct vmod_directors_shard *vshard, VCL_STRANDS s)
258
{
259
260 440
        (void)ctx;
261 440
        (void)vshard;
262
263 440
        return ((VCL_INT)VRT_HashStrands32(s));
264
}
265
266
VCL_VOID v_matchproto_(td_directors_set_warmup)
267 160
vmod_shard_set_warmup(VRT_CTX, struct vmod_directors_shard *vshard,
268
    VCL_REAL probability)
269
{
270 160
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
271 160
        if (probability < 0 || probability >= 1) {
272 80
                shard_notice(ctx->vsl, vshard->shardd->name,
273
                    ".set_warmup(%f) ignored", probability);
274 80
                return;
275
        }
276 80
        shardcfg_set_warmup(vshard->shardd, probability);
277 160
}
278
279
VCL_VOID v_matchproto_(td_directors_set_rampup)
280 80
vmod_shard_set_rampup(VRT_CTX, struct vmod_directors_shard *vshard,
281
    VCL_DURATION duration)
282
{
283 80
        (void)ctx;
284 80
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
285 80
        shardcfg_set_rampup(vshard->shardd, duration);
286 80
}
287
288
VCL_VOID v_matchproto_(td_directors_shard_associate)
289 160
vmod_shard_associate(VRT_CTX,
290
    struct vmod_directors_shard *vshard, VCL_BLOB b)
291
{
292
        const struct vmod_directors_shard_param *ppt;
293 160
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
294
295 160
        if (b == NULL) {
296 40
                sharddir_set_param(vshard->shardd, &shard_param_default);
297 40
                return;
298
        }
299
300 120
        ppt = shard_param_blob(b);
301
302 120
        if (ppt == NULL) {
303 40
                shard_fail(ctx, vshard->shardd->name, "%s",
304
                    "shard .associate param invalid");
305 40
                return;
306
        }
307
308 80
        sharddir_set_param(vshard->shardd, ppt);
309 160
}
310
311
VCL_BOOL v_matchproto_(td_directors_shard_add_backend)
312 4560
vmod_shard_add_backend(VRT_CTX, struct vmod_directors_shard *vshard,
313
    struct VARGS(shard_add_backend) *args)
314
{
315 4560
        VCL_REAL weight = 1;
316
317 4560
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
318
319 4560
        if (args->backend == NULL) {
320 40
                shard_fail(ctx, vshard->shardd->name, "%s",
321
                    "None backend cannot be added");
322 40
                return (0);
323
        }
324
325 4520
        if (args->valid_weight) {
326 120
                if (args->weight >= 1)
327 80
                        weight = args->weight;
328
                else
329 40
                        shard_notice(ctx->vsl, vshard->shardd->name,
330
                            ".add_backend(weight=%f) ignored", args->weight);
331 120
        }
332
333 9040
        return (shardcfg_add_backend(ctx, vshard->shardd, args->backend,
334 4520
            args->valid_ident ? args->ident : NULL,
335 4520
            args->valid_rampup ? args->rampup : nan(""),
336 4520
            weight));
337 4560
}
338
339
VCL_BOOL v_matchproto_(td_directors_shard_remove_backend)
340 1200
vmod_shard_remove_backend(VRT_CTX, struct vmod_directors_shard *vshard,
341
    struct VARGS(shard_remove_backend) *args)
342
{
343 1200
        VCL_BACKEND be = args->valid_backend ? args->backend : NULL;
344 1200
        VCL_STRING ident = args->valid_ident ? args->ident : NULL;
345
346 1200
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
347
348 1200
        if (be == NULL && ident == NULL) {
349 40
                shard_fail(ctx, vshard->shardd->name, "%s",
350
                    ".remove_backend(): either backend or ident are required");
351 40
                return (0);
352
        }
353
354 1160
        return (shardcfg_remove_backend(ctx, vshard->shardd, be, ident));
355 1200
}
356
357
VCL_BOOL v_matchproto_(td_directors_shard_clear)
358 600
vmod_shard_clear(VRT_CTX, struct vmod_directors_shard *vshard)
359
{
360 600
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
361 600
        return (shardcfg_clear(ctx, vshard->shardd));
362
}
363
364
VCL_BOOL v_matchproto_(td_directors_shard_reconfigure)
365 1680
vmod_shard_reconfigure(VRT_CTX, struct vmod_directors_shard *vshard,
366
    VCL_INT replicas)
367
{
368 1680
        return (shardcfg_reconfigure(ctx, vshard->shardd, replicas));
369
}
370
371
static inline uint32_t
372 5160
shard_get_key(VRT_CTX, const struct vmod_directors_shard_param *p)
373
{
374
        struct http *http;
375 5160
        VCL_ENUM by = default_by(p->by);
376
377 5160
        if (by == VENUM(KEY) || by == VENUM(BLOB))
378 4080
                return (p->key);
379 1080
        if (by == VENUM(HASH) && ctx->bo != NULL) {
380 280
                CHECK_OBJ(ctx->bo, BUSYOBJ_MAGIC);
381 280
                return (vbe32dec(ctx->bo->digest));
382
        }
383 800
        if (by == VENUM(HASH) || by == VENUM(URL)) {
384 800
                if (ctx->http_req) {
385 560
                        AN(http = ctx->http_req);
386 560
                } else {
387 240
                        AN(ctx->http_bereq);
388 240
                        AN(http = ctx->http_bereq);
389
                }
390 800
                return (VRT_HashStrands32(TOSTRAND(http->hd[HTTP_HDR_URL].b)));
391
        }
392 0
        WRONG("by enum");
393 5160
}
394
395
/*
396
 * merge parameters to resolve all undef values
397
 * key is to be calculated after merging
398
 */
399
static void
400 23040
shard_param_merge(struct vmod_directors_shard_param *to,
401
                  const struct vmod_directors_shard_param *from)
402
{
403 23040
        CHECK_OBJ_NOTNULL(to, VMOD_SHARD_SHARD_PARAM_MAGIC);
404 23040
        assert((to->mask & ~arg_mask_param_) == 0);
405
406 23040
        if (to->mask == arg_mask_param_)
407 0
                return;
408
409 23040
        CHECK_OBJ_NOTNULL(from, VMOD_SHARD_SHARD_PARAM_MAGIC);
410 23040
        assert((from->mask & ~arg_mask_param_) == 0);
411
412 23040
        if ((to->mask & arg_by) == 0 && (from->mask & arg_by) != 0) {
413 8360
                to->by = from->by;
414 8360
                if (from->by == VENUM(KEY) || from->by == VENUM(BLOB))
415 4080
                        to->key = from->key;
416 8360
        }
417
418
#define mrg(to, from, field) do {                                       \
419
                if (((to)->mask & arg_ ## field) == 0 &&                \
420
                    ((from)->mask & arg_ ## field) != 0)                \
421
                        (to)->field = (from)->field;                    \
422
        } while(0)
423
424 23040
        mrg(to, from, healthy);
425 23040
        mrg(to, from, rampup);
426 23040
        mrg(to, from, alt);
427 23040
        mrg(to, from, warmup);
428
#undef mrg
429
430 23040
        to->mask |= from->mask;
431
432 23040
        if (to->mask == arg_mask_param_)
433 10160
                return;
434
435 12880
        AN(from->defaults);
436 12880
        shard_param_merge(to, from->defaults);
437 23040
}
438
439
static uint32_t
440 400
shard_blob_key(VCL_BLOB key_blob)
441
{
442 400
        uint8_t k[4] = { 0 };
443
        const uint8_t *b;
444
        size_t i, ki;
445
446 400
        AN(key_blob);
447 400
        AN(key_blob->blob);
448 400
        assert(key_blob->len > 0);
449
450 400
        if (key_blob->len >= 4)
451 400
                ki = 0;
452
        else
453 0
                ki = 4 - key_blob->len;
454
455 400
        b = key_blob->blob;
456 2000
        for (i = 0; ki < 4; i++, ki++)
457 1600
                k[ki] = b[i];
458 400
        assert(i <= key_blob->len);
459
460 400
        return (vbe32dec(k));
461
}
462
463
/*
464
 * convert vmod interface valid_* to our bitmask
465
 */
466
467
#define tobit(args, name) ((args)->valid_##name ? arg_##name : 0)
468
469
static uint32_t
470 4280
shard_backendarg_mask_(const struct VARGS(shard_backend) * const a)
471
{
472 12840
        return (tobit(a, by)            |
473 8560
                tobit(a, key)           |
474 8560
                tobit(a, key_blob)      |
475 8560
                tobit(a, alt)           |
476 8560
                tobit(a, warmup)        |
477 8560
                tobit(a, rampup)        |
478 8560
                tobit(a, healthy)       |
479 8560
                tobit(a, param)         |
480 4280
                tobit(a, resolve));
481
}
482
static uint32_t
483 3960
shard_param_set_mask(const struct VARGS(shard_param_set) * const a)
484
{
485 11880
        return (tobit(a, by)            |
486 7920
                tobit(a, key)           |
487 7920
                tobit(a, key_blob)      |
488 7920
                tobit(a, alt)           |
489 7920
                tobit(a, warmup)        |
490 7920
                tobit(a, rampup)        |
491 3960
                tobit(a, healthy));
492
}
493
#undef tobit
494
495
/*
496
 * check arguments and return in a struct param
497
 */
498
static struct vmod_directors_shard_param *
499 7800
shard_param_args(VRT_CTX,
500
    struct vmod_directors_shard_param *p, const char *func,
501
    uint32_t args, VCL_ENUM by_s, VCL_INT key_int, VCL_BLOB key_blob,
502
    VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy_s)
503
{
504
505 7800
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
506 7800
        AN(p->vcl_name);
507
508 7800
        assert((args & ~arg_mask_set_) == 0);
509
510 7800
        if (!(args & arg_by))
511 2000
                by_s = NULL;
512 7800
        by_s = default_by(by_s);
513
514
        /* by_s / key_int / key_blob */
515 7800
        if (by_s == VENUM(KEY)) {
516 3200
                if ((args & arg_key) == 0) {
517 40
                        shard_fail(ctx, p->vcl_name,
518
                            "%s missing key argument with by=%s",
519
                            func, by_s);
520 40
                        return (NULL);
521
                }
522 3160
                if (key_int < 0 || key_int > UINT32_MAX) {
523 40
                        shard_fail(ctx, p->vcl_name,
524
                            "%s invalid key argument %jd with by=%s",
525
                            func, (intmax_t)key_int, by_s);
526 40
                        return (NULL);
527
                }
528 3120
                assert(key_int >= 0);
529 3120
                assert(key_int <= UINT32_MAX);
530 3120
                p->key = (uint32_t)key_int;
531 7720
        } else if (by_s == VENUM(BLOB)) {
532 480
                if ((args & arg_key_blob) == 0) {
533 40
                        shard_fail(ctx, p->vcl_name,
534
                            "%s missing key_blob argument with by=%s",
535
                            func, by_s);
536 40
                        return (NULL);
537
                }
538 440
                if (key_blob == NULL || key_blob->len == 0 ||
539 400
                    key_blob->blob == NULL) {
540 40
                        shard_err(ctx->vsl, p->vcl_name,
541
                            "%s by=BLOB but no or empty key_blob - using key 0",
542
                            func);
543 40
                        p->key = 0;
544 40
                } else
545 400
                        p->key = shard_blob_key(key_blob);
546 4560
        } else if (by_s == VENUM(HASH) || by_s == VENUM(URL)) {
547 4120
                if (args & (arg_key|arg_key_blob)) {
548 80
                        shard_fail(ctx, p->vcl_name,
549
                            "%s key and key_blob arguments are "
550
                            "invalid with by=%s", func, by_s);
551 80
                        return (NULL);
552
                }
553 4040
        } else {
554 0
                WRONG("by enum");
555
        }
556 7600
        p->by = by_s;
557
558 7600
        if (args & arg_alt) {
559 2520
                if (alt < 0) {
560 40
                        shard_fail(ctx, p->vcl_name,
561
                            "%s invalid alt argument %jd",
562
                            func, (intmax_t)alt);
563 40
                        return (NULL);
564
                }
565 2480
                p->alt = alt;
566 2480
        }
567
568 7560
        if (args & arg_warmup) {
569 760
                if ((warmup < 0 && warmup != -1) || warmup > 1) {
570 80
                        shard_fail(ctx, p->vcl_name,
571
                            "%s invalid warmup argument %f",
572
                            func, warmup);
573 80
                        return (NULL);
574
                }
575 680
                p->warmup = warmup;
576 680
        }
577
578 7480
        if (args & arg_rampup)
579 240
                p->rampup = !!rampup;
580
581 7480
        if (args & arg_healthy)
582 1320
                p->healthy = healthy_s;
583
584 7480
        p->mask = args & arg_mask_param_;
585 7480
        return (p);
586 7800
}
587
588
VCL_BACKEND v_matchproto_(td_directors_shard_backend)
589 4280
vmod_shard_backend(VRT_CTX, struct vmod_directors_shard *vshard,
590
                   struct VARGS(shard_backend) *a)
591
{
592
        struct sharddir *shardd;
593
        struct vmod_directors_shard_param pstk;
594 4280
        struct vmod_directors_shard_param *pp = NULL;
595
        const struct vmod_directors_shard_param *ppt;
596
        VCL_ENUM resolve;
597 4280
        uint32_t args = shard_backendarg_mask_(a);
598
599 4280
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
600 4280
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
601 4280
        shardd = vshard->shardd;
602 4280
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
603 4280
        assert((args & ~arg_mask_) == 0);
604
605 4280
        if (args & arg_resolve)
606 1960
                resolve = a->resolve;
607 2320
        else if (ctx->method & VCL_MET_TASK_H)
608 40
                resolve = VENUM(LAZY);
609
        else
610 2280
                resolve = VENUM(NOW);
611
612 4280
        if (resolve == VENUM(LAZY)) {
613 640
                if ((args & ~arg_resolve) == 0) {
614 280
                        AN(vshard->dir);
615 280
                        return (vshard->dir);
616
                }
617
618 360
                if ((ctx->method & SHARD_VCL_TASK_BEREQ) == 0) {
619 40
                        shard_fail(ctx, shardd->name, "%s",
620
                            ".backend(resolve=LAZY) with other "
621
                            "parameters can only be used in backend/pipe "
622
                            "context");
623 40
                        return (NULL);
624
                }
625
626 640
                pp = shard_param_task_l(ctx, shardd, shardd->name,
627 320
                    shardd->param);
628 320
                if (pp == NULL)
629 0
                        return (NULL);
630 3960
        } else if (resolve == VENUM(NOW)) {
631 3640
                if (ctx->method & VCL_MET_TASK_H) {
632 40
                        shard_fail(ctx, shardd->name, "%s",
633
                            ".backend(resolve=NOW) can not be "
634
                            "used in vcl_init{}/vcl_fini{}");
635 40
                        return (NULL);
636
                }
637 7200
                ppt = shard_param_task_r(ctx, shardd, shardd->name,
638 3600
                    shardd->param);
639 3600
                AN(ppt);
640 3600
                pp = shard_param_stack(&pstk, ppt, shardd->name);
641 3600
        } else {
642 0
                WRONG("resolve enum");
643
        }
644
645 3920
        AN(pp);
646
647 3920
        if (args & arg_param) {
648 120
                ppt = shard_param_blob(a->param);
649 120
                if (ppt == NULL) {
650 40
                        shard_fail(ctx, shardd->name, "%s",
651
                            ".backend(key_blob) param invalid");
652 40
                        return (NULL);
653
                }
654 80
                pp->defaults = ppt;
655 80
        }
656
657 7760
        pp = shard_param_args(ctx, pp, "shard.backend()",
658 3880
                              args & arg_mask_set_,
659 3880
                              a->by, a->key, a->key_blob, a->alt, a->warmup,
660 3880
                              a->rampup, a->healthy);
661 3880
        if (pp == NULL)
662 0
                return (NULL);
663
664 3880
        if (resolve == VENUM(LAZY))
665 320
                return (vshard->dir);
666
667 3560
        assert(resolve == VENUM(NOW));
668 3560
        shard_param_merge(pp, pp->defaults);
669 7120
        return (sharddir_pick_be(ctx, shardd, shard_get_key(ctx, pp),
670 3560
            pp->alt, pp->warmup, pp->rampup, pp->healthy));
671 4280
}
672
673
static VCL_BOOL v_matchproto_(vdi_healthy)
674 640
vmod_shard_healthy(VRT_CTX, VCL_BACKEND dir, VCL_TIME *changed)
675
{
676
        struct sharddir *shardd;
677
678 640
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
679 640
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
680 640
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
681 640
        return (sharddir_any_healthy(ctx, shardd, changed));
682
}
683
684
static VCL_BACKEND v_matchproto_(vdi_resolve_f)
685 600
vmod_shard_resolve(VRT_CTX, VCL_BACKEND dir)
686
{
687
        struct sharddir *shardd;
688
        struct vmod_directors_shard_param pstk[1];
689
        const struct vmod_directors_shard_param *pp;
690
691 600
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
692 600
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
693 600
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
694
695 1200
        pp = vmod_shard_param_read(ctx, shardd, shardd->name,
696 600
            shardd->param, pstk);
697 600
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
698
699 1200
        return (sharddir_pick_be(ctx, shardd,
700 600
                                 shard_get_key(ctx, pp), pp->alt, pp->warmup,
701 600
                                 pp->rampup, pp->healthy));
702
}
703
704
static void v_matchproto_(vdi_list_f)
705 1200
vmod_shard_list(VRT_CTX, VCL_BACKEND dir, struct vsb *vsb, int pflag, int jflag)
706
{
707
        struct sharddir *shardd;
708
        struct shard_backend *sbe;
709 1200
        VCL_TIME c, changed = 0;
710
        VCL_DURATION rampup_d, d;
711
        VCL_BACKEND be;
712
        VCL_BOOL h;
713 1200
        unsigned i, nh = 0;
714
        double rampup_p;
715
716 1200
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
717 1200
        CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
718 1200
        CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
719
720 1200
        if (pflag) {
721 160
                if (jflag) {
722 40
                        VSB_cat(vsb, "{\n");
723 40
                        VSB_indent(vsb, 2);
724 40
                        VSB_printf(vsb, "\"warmup\": %f,\n", shardd->warmup);
725 80
                        VSB_printf(vsb, "\"rampup_duration\": %f,\n",
726 40
                            shardd->rampup_duration);
727 40
                        VSB_cat(vsb, "\"backends\": {\n");
728 40
                        VSB_indent(vsb, 2);
729 40
                } else {
730 120
                        VSB_cat(vsb, "\n\n\tBackend\tIdent\tHealth\t"
731
                            "Rampup  Remaining\n");
732
                }
733 160
        }
734
735 1200
        sharddir_rdlock(shardd);
736 4840
        for (i = 0; i < shardd->n_backend; i++) {
737 3640
                sbe = &shardd->backend[i];
738 3640
                AN(sbe);
739 3640
                be = sbe->backend;
740 3640
                CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC);
741
742 3640
                c = 0;
743 3640
                h = VRT_Healthy(ctx, be, &c);
744 3640
                if (h)
745 3280
                        nh++;
746 3640
                if (c > changed)
747 2800
                        changed = c;
748 3640
                if ((pflag) == 0)
749 3000
                        continue;
750
751 640
                d = ctx->now - c;
752 640
                rampup_d = shardcfg_get_rampup(shardd, i);
753 640
                if (! h) {
754 80
                        rampup_p = 0.0;
755 80
                        rampup_d = 0.0;
756 640
                } else if (d < rampup_d) {
757 240
                        rampup_p = d / rampup_d;
758 240
                        rampup_d -= d;
759 240
                } else {
760 320
                        rampup_p = 1.0;
761 320
                        rampup_d = 0.0;
762
                }
763
764 640
                if (jflag) {
765 160
                        if (i)
766 120
                                VSB_cat(vsb, ",\n");
767 320
                        VSB_printf(vsb, "\"%s\": {\n",
768 160
                            be->vcl_name);
769 160
                        VSB_indent(vsb, 2);
770 320
                        VSB_printf(vsb, "\"ident\": \"%s\",\n",
771 160
                            sbe->ident ? sbe->ident : be->vcl_name);
772 320
                        VSB_printf(vsb, "\"health\": \"%s\",\n",
773 160
                            h ? "healthy" : "sick");
774 160
                        VSB_printf(vsb, "\"rampup\": %f,\n", rampup_p);
775 320
                        VSB_printf(vsb, "\"rampup_remaining\": %.3f\n",
776 160
                            rampup_d);
777 160
                        VSB_indent(vsb, -2);
778 160
                        VSB_cat(vsb, "}");
779 160
                } else {
780 960
                        VSB_printf(vsb, "\t%s\t%s\t%s\t%6.2f%% %8.3fs\n",
781 480
                            be->vcl_name,
782 480
                            sbe->ident ? sbe->ident : be->vcl_name,
783 480
                            h ? "healthy" : "sick",
784 480
                            rampup_p * 100, rampup_d);
785
                }
786 640
        }
787 1200
        sharddir_unlock(shardd);
788
789 1200
        if (jflag && (pflag)) {
790 40
                VSB_cat(vsb, "\n");
791 40
                VSB_indent(vsb, -2);
792 40
                VSB_cat(vsb, "}\n");
793 40
                VSB_indent(vsb, -2);
794 40
                VSB_cat(vsb, "},\n");
795 40
        }
796
797 1200
        if (pflag)
798 160
                return;
799
800 1040
        if (jflag)
801 160
                VSB_printf(vsb, "[%u, %u, \"%s\"]", nh, i,
802 80
                    nh ? "healthy" : "sick");
803
        else
804 960
                VSB_printf(vsb, "%u/%u\t%s", nh, i, nh ? "healthy" : "sick");
805 1200
}
806
807
VCL_VOID v_matchproto_(td_directors_shard_backend)
808 280
vmod_shard_debug(VRT_CTX, struct vmod_directors_shard *vshard,
809
    VCL_INT i)
810
{
811 280
        CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
812
813 280
        (void)ctx;
814 280
        sharddir_debug(vshard->shardd, i & UINT32_MAX);
815 280
}
816
817
/* =============================================================
818
 * shard_param
819
 */
820
821
VCL_VOID v_matchproto_(td_directors_shard_param__init)
822 680
vmod_shard_param__init(VRT_CTX,
823
    struct vmod_directors_shard_param **pp, const char *vcl_name)
824
{
825
        struct vmod_directors_shard_param *p;
826
827 680
        (void) ctx;
828 680
        AN(pp);
829 680
        AZ(*pp);
830 680
        ALLOC_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
831 680
        AN(p);
832 680
        p->vcl_name = vcl_name;
833 680
        p->scope = SCOPE_VCL;
834 680
        p->defaults = &shard_param_default;
835
836 680
        *pp = p;
837 680
}
838
839
VCL_VOID v_matchproto_(td_directors_shard_param__fini)
840 320
vmod_shard_param__fini(struct vmod_directors_shard_param **pp)
841
{
842
        struct vmod_directors_shard_param *p;
843
844 320
        TAKE_OBJ_NOTNULL(p, pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
845 320
        FREE_OBJ(p);
846 320
}
847
848
/*
849
 * init a stack param struct defaulting to pa with the given name
850
 */
851
static struct vmod_directors_shard_param *
852 10200
shard_param_stack(struct vmod_directors_shard_param *p,
853
    const struct vmod_directors_shard_param *pa, const char *who)
854
{
855 10200
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
856 10200
        assert(pa->scope > _SCOPE_INVALID);
857
858 10200
        AN(p);
859 10200
        INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
860 10200
        p->vcl_name = who;
861 10200
        p->scope = SCOPE_STACK;
862 10200
        p->defaults = pa;
863
864 10200
        return (p);
865
}
866
867
static const struct vmod_directors_shard_param *
868 10360
shard_param_task_r(VRT_CTX, const void *id, const char *who,
869
   const struct vmod_directors_shard_param *pa)
870
{
871
        const struct vmod_directors_shard_param *p;
872
        const struct vmod_priv *task;
873
        const void *task_id;
874
875 10360
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
876 10360
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
877 10360
        assert(pa->scope > _SCOPE_INVALID);
878
879 10360
        task_id = (const char *)id + task_off_param;
880 10360
        task = VRT_priv_task_get(ctx, task_id);
881
882 10360
        if (task) {
883 5440
                CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC);
884 5440
                assert(p->scope == SCOPE_TASK);
885 5440
                assert(who == p->vcl_name);
886 5440
                return (p);
887
        }
888
889 4920
        if (id == pa || pa->scope != SCOPE_VCL)
890 3560
                return (pa);
891
892 1360
        return (shard_param_task_r(ctx, pa, pa->vcl_name, pa));
893 10360
}
894
895
/*
896
 * get a task scoped param struct for id defaulting to pa
897
 * if id != pa and pa has VCL scope, also get a task scoped param struct for pa
898
 */
899
static struct vmod_directors_shard_param *
900 3920
shard_param_task_l(VRT_CTX, const void *id, const char *who,
901
   const struct vmod_directors_shard_param *pa)
902
{
903
        struct vmod_directors_shard_param *p;
904
        struct vmod_priv *task;
905
        const void *task_id;
906
907 3920
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
908 3920
        CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
909 3920
        assert(pa->scope > _SCOPE_INVALID);
910
911 3920
        task_id = (const char *)id + task_off_param;
912 3920
        task = VRT_priv_task(ctx, task_id);
913
914 3920
        if (task == NULL) {
915 0
                shard_fail(ctx, who, "%s", "no priv_task");
916 0
                return (NULL);
917
        }
918
919 3920
        if (task->priv) {
920 320
                CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC);
921 320
                assert(p->scope == SCOPE_TASK);
922 320
                assert(who == p->vcl_name);
923 320
                return (p);
924
        }
925
926 7200
        WS_TASK_ALLOC_OBJ(ctx, p, VMOD_SHARD_SHARD_PARAM_MAGIC);
927 3600
        if (p == NULL)
928 0
                return (NULL);
929 3600
        task->priv = p;
930 3600
        p->vcl_name = who;
931 3600
        p->scope = SCOPE_TASK;
932
933 3600
        if (id == pa || pa->scope != SCOPE_VCL)
934 3520
                p->defaults = pa;
935
        else
936 80
                p->defaults = shard_param_task_l(ctx, pa, pa->vcl_name, pa);
937
938 3600
        if (p->defaults == NULL)
939 0
                return (NULL);
940
941 3600
        return (p);
942 3920
}
943
944
static struct vmod_directors_shard_param *
945 4160
shard_param_prep(VRT_CTX, struct vmod_directors_shard_param *p,
946
    const char *who)
947
{
948 4160
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
949 4160
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
950
951 4160
        if (ctx->method & SHARD_VCL_TASK_REQ) {
952 40
                shard_fail(ctx, p->vcl_name, "%s may only be used "
953
                    "in vcl_init and in backend/pipe context", who);
954 40
                return (NULL);
955 4120
        } else if (ctx->method & SHARD_VCL_TASK_BEREQ)
956 3520
                p = shard_param_task_l(ctx, p, p->vcl_name, p);
957
        else
958 600
                assert(ctx->method & VCL_MET_TASK_H);
959
960 4120
        return (p);
961 4160
}
962
963
VCL_VOID v_matchproto_(td_directors_shard_param_set)
964 3960
vmod_shard_param_set(VRT_CTX, struct vmod_directors_shard_param *p,
965
                     struct VARGS(shard_param_set) *a)
966
{
967 3960
        uint32_t args = shard_param_set_mask(a);
968
969 3960
        assert((args & ~arg_mask_set_) == 0);
970
971 3960
        p = shard_param_prep(ctx, p, "shard_param.set()");
972 3960
        if (p == NULL)
973 40
                return;
974 7840
        (void) shard_param_args(ctx, p, "shard_param.set()", args,
975 3920
                                a->by, a->key, a->key_blob, a->alt, a->warmup,
976 3920
                                a->rampup, a->healthy);
977 3960
}
978
979
VCL_VOID v_matchproto_(td_directors_shard_param_clear)
980 200
vmod_shard_param_clear(VRT_CTX,
981
    struct vmod_directors_shard_param *p)
982
{
983 200
        p = shard_param_prep(ctx, p, "shard_param.clear()");
984 200
        if (p == NULL)
985 0
                return;
986 200
        p->mask = 0;
987 200
}
988
989
static const struct vmod_directors_shard_param *
990 6600
vmod_shard_param_read(VRT_CTX, const void *id, const char *who,
991
    const struct vmod_directors_shard_param *p,
992
    struct vmod_directors_shard_param *pstk)
993
{
994
        struct vmod_directors_shard_param *pp;
995
996 6600
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
997 6600
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
998
999 6600
        if (ctx->method == 0 || (ctx->method & SHARD_VCL_TASK_BEREQ))
1000 5400
                p = shard_param_task_r(ctx, id, who, p);
1001
1002 6600
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1003 6600
        pp = shard_param_stack(pstk, p, p->vcl_name);
1004 6600
        shard_param_merge(pp, p);
1005 6600
        return (pp);
1006
}
1007
1008
VCL_STRING v_matchproto_(td_directors_shard_param_get_by)
1009 1000
vmod_shard_param_get_by(VRT_CTX,
1010
    struct vmod_directors_shard_param *p)
1011
{
1012
        struct vmod_directors_shard_param pstk;
1013
        const struct vmod_directors_shard_param *pp;
1014
1015 1000
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1016 1000
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1017 1000
        return (default_by(pp->by));
1018
}
1019
1020
VCL_INT v_matchproto_(td_directors_shard_param_get_key)
1021 1000
vmod_shard_param_get_key(VRT_CTX,
1022
    struct vmod_directors_shard_param *p)
1023
{
1024
        struct vmod_directors_shard_param pstk;
1025
        const struct vmod_directors_shard_param *pp;
1026
1027 1000
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1028 1000
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1029 1000
        return ((VCL_INT)shard_get_key(ctx, pp));
1030
}
1031
VCL_INT v_matchproto_(td_directors_shard_param_get_alt)
1032 1000
vmod_shard_param_get_alt(VRT_CTX,
1033
    struct vmod_directors_shard_param *p)
1034
{
1035
        struct vmod_directors_shard_param pstk;
1036
        const struct vmod_directors_shard_param *pp;
1037
1038 1000
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1039 1000
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1040 1000
        return (pp->alt);
1041
}
1042
1043
VCL_REAL v_matchproto_(td_directors_shard_param_get_warmup)
1044 1000
vmod_shard_param_get_warmup(VRT_CTX,
1045
    struct vmod_directors_shard_param *p)
1046
{
1047
        struct vmod_directors_shard_param pstk;
1048
        const struct vmod_directors_shard_param *pp;
1049
1050 1000
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1051 1000
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1052 1000
        return (pp->warmup);
1053
}
1054
1055
VCL_BOOL v_matchproto_(td_directors_shard_param_get_rampup)
1056 1000
vmod_shard_param_get_rampup(VRT_CTX,
1057
    struct vmod_directors_shard_param *p)
1058
{
1059
        struct vmod_directors_shard_param pstk;
1060
        const struct vmod_directors_shard_param *pp;
1061
1062 1000
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1063 1000
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1064 1000
        return (pp->rampup);
1065
}
1066
1067
VCL_STRING v_matchproto_(td_directors_shard_param_get_healthy)
1068 1000
vmod_shard_param_get_healthy(VRT_CTX,
1069
    struct vmod_directors_shard_param *p)
1070
{
1071
        struct vmod_directors_shard_param pstk;
1072
        const struct vmod_directors_shard_param *pp;
1073
1074 1000
        pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1075 1000
        CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1076 1000
        return (default_healthy(pp->healthy));
1077
}
1078
1079
static const struct vmod_directors_shard_param *
1080 240
shard_param_blob(VCL_BLOB blob)
1081
{
1082
        const struct vmod_directors_shard_param *p;
1083
1084 400
        if (blob && blob->type == VMOD_SHARD_SHARD_PARAM_BLOB &&
1085 160
            blob->blob != NULL &&
1086 160
            blob->len == sizeof(struct vmod_directors_shard_param)) {
1087 160
                CAST_OBJ_NOTNULL(p, blob->blob, VMOD_SHARD_SHARD_PARAM_MAGIC);
1088 160
                return (p);
1089
        }
1090
1091 80
        return (NULL);
1092 240
}
1093
1094
VCL_BLOB v_matchproto_(td_directors_shard_param_use)
1095 160
vmod_shard_param_use(VRT_CTX,
1096
    struct vmod_directors_shard_param *p)
1097
{
1098 160
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
1099 160
        CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1100
1101 160
        return (VRT_blob(ctx, "xshard_param.use()", p, sizeof *p,
1102
            VMOD_SHARD_SHARD_PARAM_BLOB));
1103
}