varnish-cache/vmod/vmod_directors_shard_cfg.c
1
/*-
2
 * Copyright 2009-2016 UPLEX - Nils Goroll Systemoptimierung
3
 * All rights reserved.
4
 *
5
 * Authors: Nils Goroll <nils.goroll@uplex.de>
6
 *          Geoffrey Simmons <geoff@uplex.de>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 */
31
32
#include "config.h"
33
34
#include <limits.h>
35
#include <stdlib.h>
36
#include <stdio.h>
37
#include <string.h>
38
39
#include "cache/cache.h"
40
41
#include "vmod_directors_shard_dir.h"
42
#include "vmod_directors_shard_cfg.h"
43
44
/*lint -esym(749,  shard_change_task_e::*) */
45
enum shard_change_task_e {
46
        _SHARD_TASK_E_INVALID = 0,
47
        CLEAR,
48
        ADD_BE,
49
        REMOVE_BE,
50
        _SHARD_TASK_E_MAX
51
};
52
53
struct shard_change_task {
54
        unsigned                                magic;
55
#define SHARD_CHANGE_TASK_MAGIC                 0x1e1168af
56
        enum shard_change_task_e                task;
57
        void                                    *priv;
58
        VCL_REAL                                weight;
59
        VSTAILQ_ENTRY(shard_change_task)        list;
60
};
61
62
struct shard_change {
63
        unsigned                                magic;
64
#define SHARD_CHANGE_MAGIC                      0xdff5c9a6
65
        struct vsl_log                          *vsl;
66
        struct sharddir                         *shardd;
67
        VSTAILQ_HEAD(,shard_change_task)        tasks;
68
};
69
70
struct backend_reconfig {
71
        struct sharddir * const shardd;
72
        unsigned                hint;   // on number of backends after reconfig
73
        unsigned                hole_n; // number of holes in backends array
74
        unsigned                hole_i; // index hint on first hole
75
};
76
77
/* forward decl */
78
static VCL_BOOL
79
change_reconfigure(VRT_CTX, struct shard_change *change, VCL_INT replicas);
80
81
/*
82
 * ============================================================
83
 * change / task list
84
 *
85
 * for backend reconfiguration, we create a change list on the VCL workspace in
86
 * a PRIV_TASK state, which we work in reconfigure.
87
 */
88
89
static void v_matchproto_(vmod_priv_fini_f)
90 190
shard_change_fini(VRT_CTX, void * priv)
91
{
92
        struct shard_change *change;
93
94 190
        if (priv == NULL)
95 0
                return;
96
97 190
        CAST_OBJ_NOTNULL(change, priv, SHARD_CHANGE_MAGIC);
98
99 190
        (void) change_reconfigure(ctx, change, 67);
100 190
}
101
102
static const struct vmod_priv_methods shard_change_priv_methods[1] = {{
103
        .magic = VMOD_PRIV_METHODS_MAGIC,
104
        .type = "vmod_directors_shard_cfg",
105
        .fini = shard_change_fini
106
}};
107
108
static struct shard_change *
109 1970
shard_change_get(VRT_CTX, struct sharddir * const shardd)
110
{
111
        struct vmod_priv *task;
112
        struct shard_change *change;
113 1970
        const void *id = (const char *)shardd + task_off_cfg;
114
115 1970
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
116
117 1970
        task = VRT_priv_task(ctx, id);
118 1970
        if (task == NULL) {
119 0
                shard_fail(ctx, shardd->name, "%s", "no priv_task");
120 0
                return (NULL);
121
        }
122
123 1970
        if (task->priv != NULL) {
124 1780
                CAST_OBJ_NOTNULL(change, task->priv, SHARD_CHANGE_MAGIC);
125 1780
                assert (change->vsl == ctx->vsl);
126 1780
                assert (change->shardd == shardd);
127 1780
                return (change);
128
        }
129
130 190
        change = WS_Alloc(ctx->ws, sizeof(*change));
131 190
        if (change == NULL) {
132 0
                shard_fail(ctx, shardd->name, "%s", "could not get workspace");
133 0
                return (NULL);
134
        }
135
136 190
        INIT_OBJ(change, SHARD_CHANGE_MAGIC);
137 190
        change->vsl = ctx->vsl;
138 190
        change->shardd = shardd;
139 190
        VSTAILQ_INIT(&change->tasks);
140 190
        task->priv = change;
141 190
        task->methods = shard_change_priv_methods;
142
143 190
        return (change);
144 1970
}
145
146
static void
147 400
shard_change_finish(struct shard_change *change)
148
{
149 400
        CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC);
150
151 400
        VSTAILQ_INIT(&change->tasks);
152 400
}
153
154
static struct shard_change_task *
155 1570
shard_change_task_add(VRT_CTX, struct shard_change *change,
156
    enum shard_change_task_e task_e, void *priv)
157
{
158
        struct shard_change_task *task;
159
160 1570
        CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC);
161
162 1570
        task = WS_Alloc(ctx->ws, sizeof(*task));
163 1570
        if (task == NULL) {
164 0
                shard_fail(ctx, change->shardd->name, "%s",
165
                    "could not get workspace for task");
166 0
                return (NULL);
167
        }
168 1570
        INIT_OBJ(task, SHARD_CHANGE_TASK_MAGIC);
169 1570
        task->task = task_e;
170 1570
        task->priv = priv;
171 1570
        VSTAILQ_INSERT_TAIL(&change->tasks, task, list);
172
173 1570
        return (task);
174 1570
}
175
176
static inline struct shard_change_task *
177 1420
shard_change_task_backend(VRT_CTX, struct sharddir *shardd,
178
    enum shard_change_task_e task_e, VCL_BACKEND be, VCL_STRING ident,
179
    VCL_DURATION rampup)
180
{
181
        struct shard_change *change;
182
        struct shard_backend *b;
183
184 1420
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
185 1420
        assert(task_e == ADD_BE || task_e == REMOVE_BE);
186
187 1420
        change = shard_change_get(ctx, shardd);
188 1420
        if (change == NULL)
189 0
                return (NULL);
190
191 1420
        b = WS_Alloc(ctx->ws, sizeof(*b));
192 1420
        if (b == NULL) {
193 0
                shard_fail(ctx, change->shardd->name, "%s",
194
                    "could not get workspace for task");
195 0
                return (NULL);
196
        }
197
198 1420
        b->backend = NULL;
199 1420
        VRT_Assign_Backend(&b->backend, be);
200 1420
        b->ident = ident != NULL && *ident != '\0' ? ident : NULL;
201 1420
        b->rampup = rampup;
202
203 1420
        return (shard_change_task_add(ctx, change, task_e, b));
204 1420
}
205
206
/*
207
 * ============================================================
208
 * director reconfiguration tasks
209
 */
210
VCL_BOOL
211 1130
shardcfg_add_backend(VRT_CTX, struct sharddir *shardd,
212
    VCL_BACKEND be, VCL_STRING ident, VCL_DURATION rampup, VCL_REAL weight)
213
{
214
        struct shard_change_task *task;
215
216 1130
        assert (weight >= 1);
217 1130
        AN(be);
218
219 2260
        task = shard_change_task_backend(ctx, shardd, ADD_BE,
220 1130
            be, ident, rampup);
221
222 1130
        if (task == NULL)
223 0
                return (0);
224
225 1130
        task->weight = weight;
226 1130
        return (1);
227 1130
}
228
229
VCL_BOOL
230 290
shardcfg_remove_backend(VRT_CTX, struct sharddir *shardd,
231
    VCL_BACKEND be, VCL_STRING ident)
232
{
233 870
        return (shard_change_task_backend(ctx, shardd, REMOVE_BE,
234 580
            be, ident, 0) != NULL);
235
}
236
237
VCL_BOOL
238 150
shardcfg_clear(VRT_CTX, struct sharddir *shardd)
239
{
240
        struct shard_change *change;
241
242 150
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
243
244 150
        change = shard_change_get(ctx, shardd);
245 150
        if (change == NULL)
246 0
                return (0);
247
248 150
        return (shard_change_task_add(ctx, change, CLEAR, NULL) != NULL);
249 150
}
250
251
/*
252
 * ============================================================
253
 * consistent hashing cirle init
254
 */
255
256
typedef int (*compar)( const void*, const void* );
257
258
static int
259 104230
circlepoint_compare(const struct shard_circlepoint *a,
260
    const struct shard_circlepoint *b)
261
{
262 104230
        return ((a->point == b->point) ? 0 : ((a->point > b->point) ? 1 : -1));
263
}
264
265
static void
266 360
shardcfg_hashcircle(struct sharddir *shardd)
267
{
268
        const struct shard_backend *backends, *b;
269
        unsigned h;
270
        uint32_t i, j, n_points, r, rmax;
271
        const char *ident;
272 360
        const int len = 12; // log10(UINT32_MAX) + 2;
273
        char s[len];
274
275 360
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
276 360
        AZ(shardd->hashcircle);
277
278 360
        assert(shardd->n_backend > 0);
279 360
        backends=shardd->backend;
280 360
        AN(backends);
281
282 360
        n_points = 0;
283 360
        rmax = (UINT32_MAX - 1) / shardd->n_backend;
284 1870
        for (b = backends; b < backends + shardd->n_backend; b++) {
285 1510
                CHECK_OBJ_NOTNULL(b->backend, DIRECTOR_MAGIC);
286 1510
                n_points += vmin_t(uint32_t, b->replicas, rmax);
287 1510
        }
288
289 360
        assert(n_points < UINT32_MAX);
290
291 360
        shardd->n_points = n_points;
292 360
        shardd->hashcircle = calloc(n_points, sizeof(struct shard_circlepoint));
293 360
        AN(shardd->hashcircle);
294
295 360
        i = 0;
296 1870
        for (h = 0, b = backends; h < shardd->n_backend; h++, b++) {
297 1510
                ident = b->ident ? b->ident : VRT_BACKEND_string(b->backend);
298
299 1510
                AN(ident);
300 1510
                assert(ident[0] != '\0');
301
302 1510
                r = vmin_t(uint32_t, b->replicas, rmax);
303
304 18350
                for (j = 0; j < r; j++) {
305 16840
                        assert(snprintf(s, len, "%d", j) < len);
306 16840
                        assert (i < n_points);
307 16840
                        shardd->hashcircle[i].point =
308 16840
                            VRT_HashStrands32(TOSTRANDS(2, ident, s));
309 16840
                        shardd->hashcircle[i].host = h;
310 16840
                        i++;
311 16840
                }
312 1510
        }
313 360
        assert (i == n_points);
314 360
        qsort( (void *) shardd->hashcircle, n_points,
315
            sizeof (struct shard_circlepoint), (compar) circlepoint_compare);
316
317 360
        if ((shardd->debug_flags & SHDBG_CIRCLE) == 0)
318 130
                return;
319
320 6290
        for (i = 0; i < n_points; i++)
321 6060
                SHDBG(SHDBG_CIRCLE, shardd,
322
                    "hashcircle[%5jd] = {point = %8x, host = %2u}\n",
323
                    (intmax_t)i, shardd->hashcircle[i].point,
324
                    shardd->hashcircle[i].host);
325 360
}
326
327
/*
328
 * ============================================================
329
 * configure the director backends
330
 */
331
332
static void
333 460
shardcfg_backend_free(struct shard_backend *f)
334
{
335 460
        if (f->freeptr)
336 350
                free (f->freeptr);
337 460
        VRT_Assign_Backend(&f->backend, NULL);
338 460
        memset(f, 0, sizeof(*f));
339 460
}
340
341
static void
342 970
shardcfg_backend_copyin(struct shard_backend *dst,
343
    const struct shard_backend *src)
344
{
345 970
        dst->backend = src->backend;
346 970
        dst->ident = src->ident ? strdup(src->ident) : NULL;
347 970
        dst->rampup = src->rampup;
348 970
}
349
350
static int
351 7140
shardcfg_backend_cmp(const struct shard_backend *a,
352
    const struct shard_backend *b)
353
{
354
        const char *ai, *bi;
355
356 7140
        ai = a->ident;
357 7140
        bi = b->ident;
358
359 7140
        assert(ai || a->backend);
360 7140
        assert(bi || b->backend);
361
362
        /* vcl_names are unique, so we can compare the backend pointers */
363 7140
        if (ai == NULL && bi == NULL)
364 470
                return (a->backend != b->backend);
365
366 6670
        if (ai == NULL)
367 60
                ai = VRT_BACKEND_string(a->backend);
368
369 6670
        if (bi == NULL)
370 150
                bi = VRT_BACKEND_string(b->backend);
371
372 6670
        AN(ai);
373 6670
        AN(bi);
374 6670
        return (strcmp(ai, bi));
375 7140
}
376
377
/* for removal, we delete all instances if the backend matches */
378
static int
379 3250
shardcfg_backend_del_cmp(const struct shard_backend *task,
380
    const struct shard_backend *b)
381
{
382 3250
        assert(task->backend || task->ident);
383
384 3250
        if (task->ident == NULL)
385 90
                return (task->backend != b->backend);
386
387 3160
        return (shardcfg_backend_cmp(task, b));
388 3250
}
389
390
static const struct shard_backend *
391 1050
shardcfg_backend_lookup(const struct backend_reconfig *re,
392
    const struct shard_backend *b)
393
{
394 1050
        unsigned i, max = re->shardd->n_backend + re->hole_n;
395 1050
        const struct shard_backend *bb = re->shardd->backend;
396
397 1050
        if (max > 0)
398 800
                AN(bb);
399
400 5010
        for (i = 0; i < max; i++) {
401 4040
                if (bb[i].backend == NULL)
402 60
                        continue;       // hole
403 3980
                if (!shardcfg_backend_cmp(b, &bb[i]))
404 80
                        return (&bb[i]);
405 3900
        }
406 970
        return (NULL);
407 1050
}
408
409
static void
410 190
shardcfg_backend_expand(const struct backend_reconfig *re)
411
{
412 190
        unsigned min = re->hint;
413
414 190
        CHECK_OBJ_NOTNULL(re->shardd, SHARDDIR_MAGIC);
415
416 190
        min = vmax_t(unsigned, min, 16);
417
418 190
        if (re->shardd->l_backend < min)
419 190
                re->shardd->l_backend = min;
420
        else
421 0
                re->shardd->l_backend *= 2;
422
423 380
        re->shardd->backend = realloc(re->shardd->backend,
424 190
            re->shardd->l_backend * sizeof *re->shardd->backend);
425
426 190
        AN(re->shardd->backend);
427 190
}
428
429
static void
430 970
shardcfg_backend_add(struct backend_reconfig *re,
431
    const struct shard_backend *b, uint32_t replicas)
432
{
433
        unsigned i;
434 970
        struct shard_backend *bb = re->shardd->backend;
435
436 970
        if (re->hole_n == 0) {
437 930
                if (re->shardd->n_backend >= re->shardd->l_backend) {
438 190
                        shardcfg_backend_expand(re);
439 190
                        bb = re->shardd->backend;
440 190
                }
441 930
                assert(re->shardd->n_backend < re->shardd->l_backend);
442 930
                i = re->shardd->n_backend;
443 930
        } else {
444 40
                assert(re->hole_i != UINT_MAX);
445 40
                do {
446 40
                        if (!bb[re->hole_i].backend)
447 40
                                break;
448 0
                } while (++(re->hole_i) < re->shardd->n_backend + re->hole_n);
449 40
                assert(re->hole_i < re->shardd->n_backend + re->hole_n);
450
451 40
                i = (re->hole_i)++;
452 40
                (re->hole_n)--;
453
        }
454
455 970
        re->shardd->n_backend++;
456 970
        shardcfg_backend_copyin(&bb[i], b);
457 970
        bb[i].replicas = replicas;
458 970
}
459
460
static void
461 130
shardcfg_backend_clear(struct sharddir *shardd)
462
{
463
        unsigned i;
464 280
        for (i = 0; i < shardd->n_backend; i++)
465 150
                shardcfg_backend_free(&shardd->backend[i]);
466 130
        shardd->n_backend = 0;
467 130
}
468
469
470
static void
471 290
shardcfg_backend_del(struct backend_reconfig *re, struct shard_backend *spec)
472
{
473 290
        unsigned i, max = re->shardd->n_backend + re->hole_n;
474 290
        struct shard_backend * const bb = re->shardd->backend;
475
476 4760
        for (i = 0; i < max; i++) {
477 4470
                if (bb[i].backend == NULL)
478 1220
                        continue;       // hole
479 3250
                if (shardcfg_backend_del_cmp(spec, &bb[i]))
480 2940
                        continue;
481
482 310
                shardcfg_backend_free(&bb[i]);
483 310
                re->shardd->n_backend--;
484 310
                if (i < re->shardd->n_backend + re->hole_n) {
485 270
                        (re->hole_n)++;
486 270
                        re->hole_i = vmin(re->hole_i, i);
487 270
                }
488 310
        }
489 290
        VRT_Assign_Backend(&spec->backend, NULL);
490 290
}
491
492
static void
493 360
shardcfg_backend_finalize(struct backend_reconfig *re)
494
{
495
        unsigned i;
496 360
        struct shard_backend * const bb = re->shardd->backend;
497
498 420
        while (re->hole_n > 0) {
499
                // trim end
500 90
                i = re->shardd->n_backend + re->hole_n - 1;
501 260
                while (re->hole_n && bb[i].backend == NULL) {
502 170
                        (re->hole_n)--;
503 170
                        i--;
504
                }
505
506 90
                if (re->hole_n == 0)
507 30
                        break;
508
509 60
                assert(re->hole_i < i);
510
511 60
                do {
512 60
                        if (!bb[re->hole_i].backend)
513 60
                                break;
514 0
                } while (++(re->hole_i) <= i);
515
516 60
                assert(re->hole_i < i);
517 60
                assert(bb[re->hole_i].backend == NULL);
518 60
                assert(bb[i].backend != NULL);
519
520 60
                memcpy(&bb[re->hole_i], &bb[i], sizeof(*bb));
521 60
                memset(&bb[i], 0, sizeof(*bb));
522
523 60
                (re->hole_n)--;
524 60
                (re->hole_i)++;
525
        }
526
527 360
        assert(re->hole_n == 0);
528 360
}
529
530
/*
531
 * ============================================================
532
 * work the change tasks
533
 */
534
535
static void
536 400
shardcfg_apply_change(struct vsl_log *vsl, struct sharddir *shardd,
537
    const struct shard_change *change, VCL_INT replicas)
538
{
539
        struct shard_change_task *task, *clear;
540
        const struct shard_backend *b;
541
        uint32_t b_replicas;
542
543 1200
        struct backend_reconfig re = {
544 400
                .shardd = shardd,
545 400
                .hint = shardd->n_backend,
546
                .hole_n = 0,
547
                .hole_i = UINT_MAX
548
        };
549
550
        // XXX assert sharddir_locked(shardd)
551
552 400
        clear = NULL;
553 1970
        VSTAILQ_FOREACH(task, &change->tasks, list) {
554 1570
                CHECK_OBJ_NOTNULL(task, SHARD_CHANGE_TASK_MAGIC);
555 1570
                switch (task->task) {
556
                case CLEAR:
557 150
                        clear = task;
558 150
                        re.hint = 0;
559 150
                        break;
560
                case ADD_BE:
561 1130
                        re.hint++;
562 1130
                        break;
563
                case REMOVE_BE:
564 290
                        re.hint--;
565 290
                        break;
566
                default:
567 0
                        INCOMPL();
568 0
                }
569 1570
        }
570
571 400
        if (clear) {
572 130
                shardcfg_backend_clear(shardd);
573 130
                clear = VSTAILQ_NEXT(clear, list);
574 130
                if (clear == NULL)
575 40
                        return;
576 90
        }
577
578 360
        task = clear;
579 1700
        VSTAILQ_FOREACH_FROM(task, &change->tasks, list) {
580 1340
                CHECK_OBJ_NOTNULL(task, SHARD_CHANGE_TASK_MAGIC);
581 1340
                switch (task->task) {
582
                case CLEAR:
583 0
                        assert(task->task != CLEAR);
584 0
                        break;
585
                case ADD_BE:
586 1050
                        b = shardcfg_backend_lookup(&re, task->priv);
587
588 1050
                        if (b == NULL) {
589 970
                                assert (task->weight >= 1);
590 970
                                if (replicas * task->weight > UINT32_MAX)
591 0
                                        b_replicas = UINT32_MAX;
592
                                else
593 970
                                        b_replicas = (uint32_t) // flint
594 970
                                                (replicas * task->weight);
595
596 1940
                                shardcfg_backend_add(&re, task->priv,
597 970
                                    b_replicas);
598 970
                                break;
599
                        }
600
601 80
                        const char * const ident = b->ident;
602
603 80
                        shard_notice(vsl, shardd->name,
604
                            "backend %s%s%s already exists - skipping",
605
                            VRT_BACKEND_string(b->backend),
606
                            ident ? "/" : "",
607
                            ident ? ident : "");
608 80
                        break;
609
                case REMOVE_BE:
610 290
                        shardcfg_backend_del(&re, task->priv);
611 290
                        break;
612
                default:
613 0
                        INCOMPL();
614 0
                }
615 1340
        }
616 360
        shardcfg_backend_finalize(&re);
617 400
}
618
619
/*
620
 * ============================================================
621
 * top reconfiguration function
622
 */
623
624
static VCL_BOOL
625 590
change_reconfigure(VRT_CTX, struct shard_change *change, VCL_INT replicas)
626
{
627
        struct sharddir *shardd;
628
629 590
        CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC);
630 590
        assert (replicas > 0);
631 590
        shardd = change->shardd;
632 590
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
633
634 590
        if (VSTAILQ_FIRST(&change->tasks) == NULL)
635 190
                return (1);
636
637 400
        sharddir_wrlock(shardd);
638
639 400
        shardcfg_apply_change(ctx->vsl, shardd, change, replicas);
640 400
        shard_change_finish(change);
641
642 400
        if (shardd->hashcircle)
643 180
                free(shardd->hashcircle);
644 400
        shardd->hashcircle = NULL;
645
646 400
        if (shardd->n_backend == 0) {
647 40
                shard_err0(ctx->vsl, shardd->name,
648
                    ".reconfigure() no backends");
649 40
                sharddir_unlock(shardd);
650 40
                return (0);
651
        }
652
653 360
        shardcfg_hashcircle(shardd);
654 360
        sharddir_unlock(shardd);
655 360
        return (1);
656 590
}
657
658
VCL_BOOL
659 420
shardcfg_reconfigure(VRT_CTX, struct sharddir *shardd, VCL_INT replicas)
660
{
661
        struct shard_change *change;
662
663 420
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
664 420
        if (replicas <= 0) {
665 20
                shard_err(ctx->vsl, shardd->name,
666
                    ".reconfigure() invalid replicas argument %ld", replicas);
667 20
                return (0);
668
        }
669
670 400
        change = shard_change_get(ctx, shardd);
671 400
        if (change == NULL)
672 0
                return (0);
673
674 400
        return (change_reconfigure(ctx, change, replicas));
675 420
}
676
677
/*
678
 * ============================================================
679
 * misc config related
680
 */
681
682
/* only for sharddir_delete() */
683
void
684 50
shardcfg_delete(const struct sharddir *shardd)
685
{
686
        unsigned i;
687
688 50
        for (i = 0; i < shardd->n_backend; i++)
689 0
                shardcfg_backend_free(&shardd->backend[i]);
690 50
        if (shardd->backend)
691 0
                free(shardd->backend);
692 50
        if (shardd->hashcircle)
693 0
                free(shardd->hashcircle);
694 50
}
695
696
VCL_VOID
697 20
shardcfg_set_warmup(struct sharddir *shardd, VCL_REAL ratio)
698
{
699 20
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
700 20
        assert(ratio >= 0 && ratio < 1);
701 20
        sharddir_wrlock(shardd);
702 20
        shardd->warmup = ratio;
703 20
        sharddir_unlock(shardd);
704 20
}
705
706
VCL_VOID
707 20
shardcfg_set_rampup(struct sharddir *shardd, VCL_DURATION duration)
708
{
709 20
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
710 20
        assert(duration >= 0);
711 20
        sharddir_wrlock(shardd);
712 20
        shardd->rampup_duration = duration;
713 20
        sharddir_unlock(shardd);
714 20
}
715
716
VCL_DURATION
717 1620
shardcfg_get_rampup(const struct sharddir *shardd, unsigned host)
718
{
719
        VCL_DURATION r;
720
721 1620
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
722
        // assert sharddir_rdlock_held(shardd);
723 1620
        assert (host < shardd->n_backend);
724
725 1620
        if (isnan(shardd->backend[host].rampup))
726 1580
                r = shardd->rampup_duration;
727
        else
728 40
                r = shardd->backend[host].rampup;
729
730 1620
        return (r);
731
}