varnish-cache/vmod/vmod_directors_shard_dir.c
0
/*-
1
 * Copyright 2009-2016 UPLEX - Nils Goroll Systemoptimierung
2
 * All rights reserved.
3
 *
4
 * Authors: Nils Goroll <nils.goroll@uplex.de>
5
 *          Geoffrey Simmons <geoff.simmons@uplex.de>
6
 *          Julian Wiesener <jw@uplex.de>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 */
31
32
/*lint -e801 */
33
34
#include "config.h"
35
36
#include <stdlib.h>
37
#include <stdio.h>
38
#include <time.h>
39
#include <limits.h>
40
41
#include "cache/cache.h"
42
43
#include "vbm.h"
44
#include "vrnd.h"
45
46
#include "vcc_directors_if.h"
47
#include "vmod_directors_shard_dir.h"
48
49
struct shard_be_info {
50
        unsigned        hostid;
51
        unsigned        healthy;
52
        double          changed;        // when
53
};
54
55
/*
56
 * circle walk state for shard_next
57
 *
58
 * pick* cut off the search after having seen all possible backends
59
 */
60
struct shard_state {
61
        const struct vrt_ctx    *ctx;
62
        struct sharddir *shardd;
63
        uint32_t                idx;
64
65
        struct vbitmap          *picklist;
66
        unsigned                pickcount;
67
68
        struct shard_be_info    previous;
69
        struct shard_be_info    last;
70
};
71
72
void
73 175
sharddir_debug(struct sharddir *shardd, const uint32_t flags)
74
{
75 175
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
76 175
        shardd->debug_flags = flags;
77 175
}
78
79
void
80 525
sharddir_log(struct vsl_log *vsl, enum VSL_tag_e tag,  const char *fmt, ...)
81
{
82
        va_list ap;
83
84 525
        va_start(ap, fmt);
85 525
        if (vsl != NULL)
86 250
                VSLbv(vsl, tag, fmt, ap);
87
        else
88 275
                VSLv(tag, NO_VXID, fmt, ap);
89 525
        va_end(ap);
90 525
}
91
92
static int
93 2600
shard_lookup(const struct sharddir *shardd, const uint32_t key)
94
{
95 2600
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
96
97 2600
        const uint32_t n = shardd->n_points;
98 2600
        uint32_t i, idx = UINT32_MAX, high = n, low = 0;
99
100 2600
        assert (n < idx);
101
102 2600
        do {
103 15875
            i = (high + low) / 2 ;
104 15875
            if (shardd->hashcircle[i].point == key)
105 25
                idx = i;
106 15850
            else if (i == n - 1)
107 450
                idx = n - 1;
108 15400
            else if (shardd->hashcircle[i].point < key &&
109 8150
                     shardd->hashcircle[i+1].point >= key)
110 1700
                idx = i + 1;
111 13700
            else if (shardd->hashcircle[i].point > key)
112 14500
                if (i == 0)
113 425
                    idx = 0;
114
                else
115 6825
                    high = i;
116
            else
117 6450
                low = i;
118 15875
        } while (idx == UINT32_MAX);
119
120 2600
        return (idx);
121
}
122
123
static int
124 5200
shard_next(struct shard_state *state, VCL_INT skip, VCL_BOOL healthy)
125
{
126 5200
        int c, chosen = -1;
127
        VCL_BACKEND be;
128
        vtim_real changed;
129
        struct shard_be_info *sbe;
130
131 5200
        AN(state);
132 5200
        CHECK_OBJ_NOTNULL(state->shardd, SHARDDIR_MAGIC);
133
134 5200
        if (state->pickcount >= state->shardd->n_backend)
135 75
                return (-1);
136
137 8200
        while (state->pickcount < state->shardd->n_backend && skip >= 0) {
138
139 8175
                c = state->shardd->hashcircle[state->idx].host;
140
141 8175
                if (!vbit_test(state->picklist, c)) {
142
143 5500
                        vbit_set(state->picklist, c);
144 5500
                        state->pickcount++;
145
146 5500
                        sbe = NULL;
147 5500
                        be = state->shardd->backend[c].backend;
148 5500
                        AN(be);
149 5500
                        if (VRT_Healthy(state->ctx, be, &changed)) {
150 5350
                                if (skip-- == 0) {
151 5100
                                        chosen = c;
152 5100
                                        sbe = &state->last;
153 5100
                                } else {
154 250
                                        sbe = &state->previous;
155
                                }
156
157 5500
                        } else if (!healthy && skip-- == 0) {
158 0
                                chosen = c;
159 0
                                sbe = &state->last;
160 0
                        }
161 5500
                        if (sbe == &state->last &&
162 5100
                            state->last.hostid != UINT_MAX)
163 2500
                                memcpy(&state->previous, &state->last,
164
                                    sizeof(state->previous));
165
166 5500
                        if (sbe) {
167 5350
                                sbe->hostid = c;
168 5350
                                sbe->healthy = 1;
169 5350
                                sbe->changed = changed;
170 5350
                        }
171 5500
                        if (chosen != -1)
172 5100
                                break;
173 400
                }
174
175 3075
                if (++(state->idx) == state->shardd->n_points)
176 475
                        state->idx = 0;
177
        }
178 5125
        return (chosen);
179 5200
}
180
181
void
182 625
sharddir_new(struct sharddir **sharddp, const char *vcl_name,
183
    const struct vmod_directors_shard_param *param)
184
{
185
        struct sharddir *shardd;
186
187 625
        AN(vcl_name);
188 625
        AN(sharddp);
189 625
        AZ(*sharddp);
190 625
        ALLOC_OBJ(shardd, SHARDDIR_MAGIC);
191 625
        AN(shardd);
192 625
        *sharddp = shardd;
193 625
        shardd->name = vcl_name;
194 625
        shardd->param = param;
195 625
        PTOK(pthread_rwlock_init(&shardd->mtx, NULL));
196 625
}
197
198
void
199 75
sharddir_set_param(struct sharddir *shardd,
200
    const struct vmod_directors_shard_param *param)
201
{
202 75
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
203 75
        shardd->param = param;
204 75
}
205
206
void
207 125
sharddir_release(struct sharddir *shardd)
208
{
209 125
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
210 125
        shardcfg_backend_clear(shardd);
211 125
}
212
213
void
214 125
sharddir_delete(struct sharddir **sharddp)
215
{
216
        struct sharddir *shardd;
217
218 125
        TAKE_OBJ_NOTNULL(shardd, sharddp, SHARDDIR_MAGIC);
219 125
        shardcfg_delete(shardd);
220 125
        PTOK(pthread_rwlock_destroy(&shardd->mtx));
221 125
        FREE_OBJ(shardd);
222 125
}
223
224
void
225 3750
sharddir_rdlock(struct sharddir *shardd)
226
{
227 3750
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
228 3750
        PTOK(pthread_rwlock_rdlock(&shardd->mtx));
229 3750
}
230
231
void
232 1100
sharddir_wrlock(struct sharddir *shardd)
233
{
234 1100
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
235 1100
        PTOK(pthread_rwlock_wrlock(&shardd->mtx));
236 1100
}
237
238
void
239 4850
sharddir_unlock(struct sharddir *shardd)
240
{
241 4850
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
242 4850
        PTOK(pthread_rwlock_unlock(&shardd->mtx));
243 4850
}
244
245
static inline void
246 2600
validate_alt(VRT_CTX, const struct sharddir *shardd, VCL_INT *alt)
247
{
248 2600
        const VCL_INT alt_max = shardd->n_backend - 1;
249
250 2600
        if (*alt < 0) {
251 0
                shard_err(ctx->vsl, shardd->name,
252
                    "invalid negative parameter alt=%ld, set to 0", *alt);
253 0
                *alt = 0;
254 2600
        } else if (*alt > alt_max) {
255 75
                shard_err(ctx->vsl, shardd->name,
256
                    "parameter alt=%ld limited to %ld", *alt, alt_max);
257 75
                *alt = alt_max;
258 75
        }
259 2600
}
260
261
static inline void
262 2600
init_state(struct shard_state *state,
263
    VRT_CTX, struct sharddir *shardd, struct vbitmap *picklist)
264
{
265 2600
        AN(picklist);
266
267 2600
        state->ctx = ctx;
268 2600
        state->shardd = shardd;
269 2600
        state->idx = UINT32_MAX;
270 2600
        state->picklist = picklist;
271
272
        /* healhy and changed only defined for valid hostids */
273 2600
        state->previous.hostid = UINT_MAX;
274 2600
        state->last.hostid = UINT_MAX;
275 2600
}
276
277
/* basically same as vdir_any_healthy
278
 * - XXX we should embed a vdir
279
 * - XXX should we return the health state of the actual backend
280
 *   for healthy=IGNORE ?
281
 */
282
VCL_BOOL
283 400
sharddir_any_healthy(VRT_CTX, struct sharddir *shardd, VCL_TIME *changed)
284
{
285 400
        unsigned i, retval = 0;
286
        VCL_BACKEND be;
287
        vtim_real c;
288
289 400
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
290 400
        sharddir_rdlock(shardd);
291 400
        if (changed != NULL)
292 175
                *changed = 0;
293 400
        for (i = 0; i < shardd->n_backend; i++) {
294 400
                be = shardd->backend[i].backend;
295 400
                CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC);
296 400
                retval = VRT_Healthy(ctx, be, &c);
297 400
                if (changed != NULL && c > *changed)
298 175
                        *changed = c;
299 400
                if (retval)
300 400
                        break;
301 0
        }
302 400
        sharddir_unlock(shardd);
303 400
        return (retval);
304
}
305
306
/*
307
 * core function for the director backend/resolve method
308
 */
309
310
static VCL_BACKEND
311 2600
sharddir_pick_be_locked(VRT_CTX, const struct sharddir *shardd, uint32_t key,
312
    VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy,
313
    struct shard_state *state)
314
{
315
        VCL_BACKEND be;
316
        VCL_DURATION chosen_r, alt_r;
317
318 2600
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
319 2600
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
320 2600
        AN(ctx->vsl);
321 2600
        assert(shardd->n_backend > 0);
322
323 2600
        assert(shardd->hashcircle);
324
325 2600
        validate_alt(ctx, shardd, &alt);
326
327 2600
        state->idx = shard_lookup(shardd, key);
328 2600
        assert(state->idx < UINT32_MAX);
329
330 2600
        SHDBG(SHDBG_LOOKUP, shardd, "lookup key %x idx %u host %u",
331
            key, state->idx, shardd->hashcircle[state->idx].host);
332
333 2600
        if (alt > 0) {
334 2025
                if (shard_next(state, alt - 1,
335 1350
                    healthy == VENUM(ALL) ? 1 : 0) == -1) {
336 0
                        if (state->previous.hostid != UINT_MAX) {
337 0
                                be = sharddir_backend(shardd,
338 0
                                    state->previous.hostid);
339 0
                                AN(be);
340 0
                                return (be);
341
                        }
342 0
                        return (NULL);
343
                }
344 675
        }
345
346 2600
        if (shard_next(state, 0, healthy == VENUM(IGNORE) ? 0 : 1) == -1) {
347 0
                if (state->previous.hostid != UINT_MAX) {
348 0
                        be = sharddir_backend(shardd, state->previous.hostid);
349 0
                        AN(be);
350 0
                        return (be);
351
                }
352 0
                return (NULL);
353
        }
354
355 2600
        be = sharddir_backend(shardd, state->last.hostid);
356 2600
        AN(be);
357
358 2600
        if (warmup == -1)
359 2575
                warmup = shardd->warmup;
360
361
        /* short path for cases we dont want ramup/warmup or can't */
362 2600
        if (alt > 0 || healthy == VENUM(IGNORE) || (!rampup && warmup == 0) ||
363 1925
            shard_next(state, 0, 1) == -1)
364 775
                return (be);
365
366 1825
        assert(alt == 0);
367 1825
        assert(state->previous.hostid != UINT_MAX);
368 1825
        assert(state->last.hostid != UINT_MAX);
369 1825
        assert(state->previous.hostid != state->last.hostid);
370 1825
        assert(be == sharddir_backend(shardd, state->previous.hostid));
371
372 1825
        chosen_r = shardcfg_get_rampup(shardd, state->previous.hostid);
373 1825
        alt_r = shardcfg_get_rampup(shardd, state->last.hostid);
374
375 1825
        SHDBG(SHDBG_RAMPWARM, shardd, "chosen host %u rampup %f changed %f",
376
            state->previous.hostid, chosen_r,
377
            ctx->now - state->previous.changed);
378 1825
        SHDBG(SHDBG_RAMPWARM, shardd, "alt host %u rampup %f changed %f",
379
            state->last.hostid, alt_r,
380
            ctx->now - state->last.changed);
381
382 1825
        if (ctx->now - state->previous.changed < chosen_r) {
383
                /*
384
                 * chosen host is in rampup
385
                 * - no change if alternative host is also in rampup or the dice
386
                 *   has rolled in favour of the chosen host
387
                 */
388 75
                if (!rampup ||
389 50
                    ctx->now - state->last.changed < alt_r ||
390 50
                    VRND_RandomTestableDouble() * chosen_r <
391 25
                    (ctx->now - state->previous.changed))
392 25
                        return (be);
393 25
        } else {
394
                /* chosen host not in rampup - warmup ? */
395 1775
                if (warmup == 0 || VRND_RandomTestableDouble() > warmup)
396 1775
                        return (be);
397
        }
398
399 25
        be = sharddir_backend(shardd, state->last.hostid);
400 25
        return (be);
401 2600
}
402
403
VCL_BACKEND
404 2600
sharddir_pick_be(VRT_CTX, struct sharddir *shardd, uint32_t key, VCL_INT alt,
405
    VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy)
406
{
407
        VCL_BACKEND be;
408
        struct shard_state state[1];
409
        unsigned picklist_sz;
410
411 2600
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
412 2600
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
413
414 2600
        sharddir_rdlock(shardd);
415
416 2600
        if (shardd->n_backend == 0) {
417 0
                shard_err0(ctx->vsl, shardd->name, "no backends");
418 0
                sharddir_unlock(shardd);
419 0
                return (NULL);
420
        }
421
422 2600
        picklist_sz = VBITMAP_SZ(shardd->n_backend);
423 2600
        char picklist_spc[picklist_sz];
424
425 2600
        memset(state, 0, sizeof(state));
426 2600
        init_state(state, ctx, shardd, vbit_init(picklist_spc, picklist_sz));
427
428 5200
        be = sharddir_pick_be_locked(ctx, shardd, key, alt, warmup, rampup,
429 2600
            healthy, state);
430 2600
        sharddir_unlock(shardd);
431
432 2600
        vbit_destroy(state->picklist);
433 2600
        return (be);
434 2600
}