| | varnish-cache/vmod/vmod_directors_shard_cfg.c |
0 |
|
/*- |
1 |
|
* Copyright 2009-2016 UPLEX - Nils Goroll Systemoptimierung |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Authors: Nils Goroll <nils.goroll@uplex.de> |
5 |
|
* Geoffrey Simmons <geoff@uplex.de> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include <limits.h> |
34 |
|
#include <stdlib.h> |
35 |
|
#include <stdio.h> |
36 |
|
#include <string.h> |
37 |
|
|
38 |
|
#include "cache/cache.h" |
39 |
|
|
40 |
|
#include "vmod_directors_shard_dir.h" |
41 |
|
#include "vmod_directors_shard_cfg.h" |
42 |
|
|
43 |
|
/*lint -esym(749, shard_change_task_e::*) */ |
44 |
|
enum shard_change_task_e { |
45 |
|
_SHARD_TASK_E_INVALID = 0, |
46 |
|
CLEAR, |
47 |
|
ADD_BE, |
48 |
|
REMOVE_BE, |
49 |
|
_SHARD_TASK_E_MAX |
50 |
|
}; |
51 |
|
|
52 |
|
struct shard_change_task { |
53 |
|
unsigned magic; |
54 |
|
#define SHARD_CHANGE_TASK_MAGIC 0x1e1168af |
55 |
|
enum shard_change_task_e task; |
56 |
|
void *priv; |
57 |
|
VCL_REAL weight; |
58 |
|
VSTAILQ_ENTRY(shard_change_task) list; |
59 |
|
}; |
60 |
|
|
61 |
|
struct shard_change { |
62 |
|
unsigned magic; |
63 |
|
#define SHARD_CHANGE_MAGIC 0xdff5c9a6 |
64 |
|
struct vsl_log *vsl; |
65 |
|
struct sharddir *shardd; |
66 |
|
VSTAILQ_HEAD(,shard_change_task) tasks; |
67 |
|
}; |
68 |
|
|
69 |
|
struct backend_reconfig { |
70 |
|
struct sharddir * const shardd; |
71 |
|
unsigned hint; // on number of backends after reconfig |
72 |
|
unsigned hole_n; // number of holes in backends array |
73 |
|
unsigned hole_i; // index hint on first hole |
74 |
|
}; |
75 |
|
|
76 |
|
/* forward decl */ |
77 |
|
static VCL_BOOL |
78 |
|
change_reconfigure(VRT_CTX, struct shard_change *change, VCL_INT replicas); |
79 |
|
|
80 |
|
/* |
81 |
|
* ============================================================ |
82 |
|
* change / task list |
83 |
|
* |
84 |
|
* for backend reconfiguration, we create a change list on the VCL workspace in |
85 |
|
* a PRIV_TASK state, which we work in reconfigure. |
86 |
|
*/ |
87 |
|
|
88 |
|
static void v_matchproto_(vmod_priv_fini_f) |
89 |
19 |
shard_change_fini(VRT_CTX, void * priv) |
90 |
|
{ |
91 |
|
struct shard_change *change; |
92 |
|
|
93 |
19 |
if (priv == NULL) |
94 |
0 |
return; |
95 |
|
|
96 |
19 |
CAST_OBJ_NOTNULL(change, priv, SHARD_CHANGE_MAGIC); |
97 |
|
|
98 |
19 |
(void) change_reconfigure(ctx, change, 67); |
99 |
19 |
} |
100 |
|
|
101 |
|
static const struct vmod_priv_methods shard_change_priv_methods[1] = {{ |
102 |
|
.magic = VMOD_PRIV_METHODS_MAGIC, |
103 |
|
.type = "vmod_directors_shard_cfg", |
104 |
|
.fini = shard_change_fini |
105 |
|
}}; |
106 |
|
|
107 |
|
static struct shard_change * |
108 |
197 |
shard_change_get(VRT_CTX, struct sharddir * const shardd) |
109 |
|
{ |
110 |
|
struct vmod_priv *task; |
111 |
|
struct shard_change *change; |
112 |
197 |
const void *id = (const char *)shardd + task_off_cfg; |
113 |
|
|
114 |
197 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
115 |
|
|
116 |
197 |
task = VRT_priv_task(ctx, id); |
117 |
197 |
if (task == NULL) { |
118 |
0 |
shard_fail(ctx, shardd->name, "%s", "no priv_task"); |
119 |
0 |
return (NULL); |
120 |
|
} |
121 |
|
|
122 |
197 |
if (task->priv != NULL) { |
123 |
178 |
CAST_OBJ_NOTNULL(change, task->priv, SHARD_CHANGE_MAGIC); |
124 |
178 |
assert (change->vsl == ctx->vsl); |
125 |
178 |
assert (change->shardd == shardd); |
126 |
178 |
return (change); |
127 |
|
} |
128 |
|
|
129 |
38 |
WS_TASK_ALLOC_OBJ(ctx, change, SHARD_CHANGE_MAGIC); |
130 |
19 |
if (change == NULL) |
131 |
0 |
return (NULL); |
132 |
19 |
change->vsl = ctx->vsl; |
133 |
19 |
change->shardd = shardd; |
134 |
19 |
VSTAILQ_INIT(&change->tasks); |
135 |
19 |
task->priv = change; |
136 |
19 |
task->methods = shard_change_priv_methods; |
137 |
|
|
138 |
19 |
return (change); |
139 |
197 |
} |
140 |
|
|
141 |
|
static void |
142 |
40 |
shard_change_finish(struct shard_change *change) |
143 |
|
{ |
144 |
40 |
CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC); |
145 |
|
|
146 |
40 |
VSTAILQ_INIT(&change->tasks); |
147 |
40 |
} |
148 |
|
|
149 |
|
static struct shard_change_task * |
150 |
157 |
shard_change_task_add(VRT_CTX, struct shard_change *change, |
151 |
|
enum shard_change_task_e task_e, void *priv) |
152 |
|
{ |
153 |
|
struct shard_change_task *task; |
154 |
|
|
155 |
157 |
CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC); |
156 |
|
|
157 |
314 |
WS_TASK_ALLOC_OBJ(ctx, task, SHARD_CHANGE_TASK_MAGIC); |
158 |
157 |
if (task == NULL) |
159 |
0 |
return (NULL); |
160 |
157 |
task->task = task_e; |
161 |
157 |
task->priv = priv; |
162 |
157 |
VSTAILQ_INSERT_TAIL(&change->tasks, task, list); |
163 |
|
|
164 |
157 |
return (task); |
165 |
157 |
} |
166 |
|
|
167 |
|
static inline struct shard_change_task * |
168 |
142 |
shard_change_task_backend(VRT_CTX, struct sharddir *shardd, |
169 |
|
enum shard_change_task_e task_e, VCL_BACKEND be, VCL_STRING ident, |
170 |
|
VCL_DURATION rampup) |
171 |
|
{ |
172 |
|
struct shard_change *change; |
173 |
|
struct shard_backend *b; |
174 |
|
|
175 |
142 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
176 |
142 |
assert(task_e == ADD_BE || task_e == REMOVE_BE); |
177 |
|
|
178 |
142 |
change = shard_change_get(ctx, shardd); |
179 |
142 |
if (change == NULL) |
180 |
0 |
return (NULL); |
181 |
|
|
182 |
142 |
b = WS_Alloc(ctx->ws, sizeof(*b)); |
183 |
142 |
if (b == NULL) { |
184 |
0 |
shard_fail(ctx, change->shardd->name, "%s", |
185 |
|
"could not get workspace for task"); |
186 |
0 |
return (NULL); |
187 |
|
} |
188 |
|
|
189 |
142 |
b->backend = NULL; |
190 |
142 |
VRT_Assign_Backend(&b->backend, be); |
191 |
142 |
b->ident = ident != NULL && *ident != '\0' ? ident : NULL; |
192 |
142 |
b->rampup = rampup; |
193 |
|
|
194 |
142 |
return (shard_change_task_add(ctx, change, task_e, b)); |
195 |
142 |
} |
196 |
|
|
197 |
|
/* |
198 |
|
* ============================================================ |
199 |
|
* director reconfiguration tasks |
200 |
|
*/ |
201 |
|
VCL_BOOL |
202 |
113 |
shardcfg_add_backend(VRT_CTX, struct sharddir *shardd, |
203 |
|
VCL_BACKEND be, VCL_STRING ident, VCL_DURATION rampup, VCL_REAL weight) |
204 |
|
{ |
205 |
|
struct shard_change_task *task; |
206 |
|
|
207 |
113 |
assert (weight >= 1); |
208 |
113 |
AN(be); |
209 |
|
|
210 |
226 |
task = shard_change_task_backend(ctx, shardd, ADD_BE, |
211 |
113 |
be, ident, rampup); |
212 |
|
|
213 |
113 |
if (task == NULL) |
214 |
0 |
return (0); |
215 |
|
|
216 |
113 |
task->weight = weight; |
217 |
113 |
return (1); |
218 |
113 |
} |
219 |
|
|
220 |
|
VCL_BOOL |
221 |
29 |
shardcfg_remove_backend(VRT_CTX, struct sharddir *shardd, |
222 |
|
VCL_BACKEND be, VCL_STRING ident) |
223 |
|
{ |
224 |
87 |
return (shard_change_task_backend(ctx, shardd, REMOVE_BE, |
225 |
58 |
be, ident, 0) != NULL); |
226 |
|
} |
227 |
|
|
228 |
|
VCL_BOOL |
229 |
15 |
shardcfg_clear(VRT_CTX, struct sharddir *shardd) |
230 |
|
{ |
231 |
|
struct shard_change *change; |
232 |
|
|
233 |
15 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
234 |
|
|
235 |
15 |
change = shard_change_get(ctx, shardd); |
236 |
15 |
if (change == NULL) |
237 |
0 |
return (0); |
238 |
|
|
239 |
15 |
return (shard_change_task_add(ctx, change, CLEAR, NULL) != NULL); |
240 |
15 |
} |
241 |
|
|
242 |
|
/* |
243 |
|
* ============================================================ |
244 |
|
* consistent hashing circle init |
245 |
|
*/ |
246 |
|
|
247 |
|
typedef int (*compar)( const void*, const void* ); |
248 |
|
|
249 |
|
static int |
250 |
10423 |
circlepoint_compare(const struct shard_circlepoint *a, |
251 |
|
const struct shard_circlepoint *b) |
252 |
|
{ |
253 |
10423 |
return ((a->point == b->point) ? 0 : ((a->point > b->point) ? 1 : -1)); |
254 |
|
} |
255 |
|
|
256 |
|
static void |
257 |
36 |
shardcfg_hashcircle(struct sharddir *shardd) |
258 |
|
{ |
259 |
|
const struct shard_backend *backends, *b; |
260 |
|
unsigned h; |
261 |
|
uint32_t i, j, n_points, r, rmax; |
262 |
|
const char *ident; |
263 |
36 |
const int len = 12; // log10(UINT32_MAX) + 2; |
264 |
36 |
char s[len]; |
265 |
|
|
266 |
36 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
267 |
36 |
AZ(shardd->hashcircle); |
268 |
|
|
269 |
36 |
assert(shardd->n_backend > 0); |
270 |
36 |
backends=shardd->backend; |
271 |
36 |
AN(backends); |
272 |
|
|
273 |
36 |
n_points = 0; |
274 |
36 |
rmax = (UINT32_MAX - 1) / shardd->n_backend; |
275 |
187 |
for (b = backends; b < backends + shardd->n_backend; b++) { |
276 |
151 |
CHECK_OBJ_NOTNULL(b->backend, DIRECTOR_MAGIC); |
277 |
151 |
n_points += vmin_t(uint32_t, b->replicas, rmax); |
278 |
151 |
} |
279 |
|
|
280 |
36 |
assert(n_points < UINT32_MAX); |
281 |
|
|
282 |
36 |
shardd->n_points = n_points; |
283 |
36 |
shardd->hashcircle = calloc(n_points, sizeof(struct shard_circlepoint)); |
284 |
36 |
AN(shardd->hashcircle); |
285 |
|
|
286 |
36 |
i = 0; |
287 |
187 |
for (h = 0, b = backends; h < shardd->n_backend; h++, b++) { |
288 |
151 |
ident = b->ident ? b->ident : VRT_BACKEND_string(b->backend); |
289 |
|
|
290 |
151 |
AN(ident); |
291 |
151 |
assert(ident[0] != '\0'); |
292 |
|
|
293 |
151 |
r = vmin_t(uint32_t, b->replicas, rmax); |
294 |
|
|
295 |
1835 |
for (j = 0; j < r; j++) { |
296 |
1684 |
assert(snprintf(s, len, "%d", j) < len); |
297 |
1684 |
assert (i < n_points); |
298 |
1684 |
shardd->hashcircle[i].point = |
299 |
1684 |
VRT_HashStrands32(TOSTRANDS(2, ident, s)); |
300 |
1684 |
shardd->hashcircle[i].host = h; |
301 |
1684 |
i++; |
302 |
1684 |
} |
303 |
151 |
} |
304 |
36 |
assert (i == n_points); |
305 |
36 |
qsort( (void *) shardd->hashcircle, n_points, |
306 |
|
sizeof (struct shard_circlepoint), (compar) circlepoint_compare); |
307 |
|
|
308 |
36 |
if ((shardd->debug_flags & SHDBG_CIRCLE) == 0) |
309 |
13 |
return; |
310 |
|
|
311 |
629 |
for (i = 0; i < n_points; i++) |
312 |
606 |
SHDBG(SHDBG_CIRCLE, shardd, |
313 |
|
"hashcircle[%5jd] = {point = %8x, host = %2u}\n", |
314 |
|
(intmax_t)i, shardd->hashcircle[i].point, |
315 |
|
shardd->hashcircle[i].host); |
316 |
36 |
} |
317 |
|
|
318 |
|
/* |
319 |
|
* ============================================================ |
320 |
|
* configure the director backends |
321 |
|
*/ |
322 |
|
|
323 |
|
static void |
324 |
46 |
shardcfg_backend_free(struct shard_backend *f) |
325 |
|
{ |
326 |
46 |
if (f->freeptr) |
327 |
35 |
free (f->freeptr); |
328 |
46 |
VRT_Assign_Backend(&f->backend, NULL); |
329 |
46 |
memset(f, 0, sizeof(*f)); |
330 |
46 |
} |
331 |
|
|
332 |
|
static void |
333 |
97 |
shardcfg_backend_copyin(struct shard_backend *dst, |
334 |
|
const struct shard_backend *src) |
335 |
|
{ |
336 |
97 |
dst->backend = src->backend; |
337 |
97 |
dst->ident = src->ident ? strdup(src->ident) : NULL; |
338 |
97 |
dst->rampup = src->rampup; |
339 |
97 |
} |
340 |
|
|
341 |
|
static int |
342 |
714 |
shardcfg_backend_cmp(const struct shard_backend *a, |
343 |
|
const struct shard_backend *b) |
344 |
|
{ |
345 |
|
const char *ai, *bi; |
346 |
|
|
347 |
714 |
ai = a->ident; |
348 |
714 |
bi = b->ident; |
349 |
|
|
350 |
714 |
assert(ai || a->backend); |
351 |
714 |
assert(bi || b->backend); |
352 |
|
|
353 |
|
/* vcl_names are unique, so we can compare the backend pointers */ |
354 |
714 |
if (ai == NULL && bi == NULL) |
355 |
47 |
return (a->backend != b->backend); |
356 |
|
|
357 |
667 |
if (ai == NULL) |
358 |
6 |
ai = VRT_BACKEND_string(a->backend); |
359 |
|
|
360 |
667 |
if (bi == NULL) |
361 |
15 |
bi = VRT_BACKEND_string(b->backend); |
362 |
|
|
363 |
667 |
AN(ai); |
364 |
667 |
AN(bi); |
365 |
667 |
return (strcmp(ai, bi)); |
366 |
714 |
} |
367 |
|
|
368 |
|
/* for removal, we delete all instances if the backend matches */ |
369 |
|
static int |
370 |
325 |
shardcfg_backend_del_cmp(const struct shard_backend *task, |
371 |
|
const struct shard_backend *b) |
372 |
|
{ |
373 |
325 |
assert(task->backend || task->ident); |
374 |
|
|
375 |
325 |
if (task->ident == NULL) |
376 |
9 |
return (task->backend != b->backend); |
377 |
|
|
378 |
316 |
return (shardcfg_backend_cmp(task, b)); |
379 |
325 |
} |
380 |
|
|
381 |
|
static const struct shard_backend * |
382 |
105 |
shardcfg_backend_lookup(const struct backend_reconfig *re, |
383 |
|
const struct shard_backend *b) |
384 |
|
{ |
385 |
105 |
unsigned i, max = re->shardd->n_backend + re->hole_n; |
386 |
105 |
const struct shard_backend *bb = re->shardd->backend; |
387 |
|
|
388 |
105 |
if (max > 0) |
389 |
80 |
AN(bb); |
390 |
|
|
391 |
501 |
for (i = 0; i < max; i++) { |
392 |
404 |
if (bb[i].backend == NULL) |
393 |
6 |
continue; // hole |
394 |
398 |
if (!shardcfg_backend_cmp(b, &bb[i])) |
395 |
8 |
return (&bb[i]); |
396 |
390 |
} |
397 |
97 |
return (NULL); |
398 |
105 |
} |
399 |
|
|
400 |
|
static void |
401 |
19 |
shardcfg_backend_expand(const struct backend_reconfig *re) |
402 |
|
{ |
403 |
19 |
unsigned min = re->hint; |
404 |
|
|
405 |
19 |
CHECK_OBJ_NOTNULL(re->shardd, SHARDDIR_MAGIC); |
406 |
|
|
407 |
19 |
min = vmax_t(unsigned, min, 16); |
408 |
|
|
409 |
19 |
if (re->shardd->l_backend < min) |
410 |
19 |
re->shardd->l_backend = min; |
411 |
|
else |
412 |
0 |
re->shardd->l_backend *= 2; |
413 |
|
|
414 |
38 |
re->shardd->backend = realloc(re->shardd->backend, |
415 |
19 |
re->shardd->l_backend * sizeof *re->shardd->backend); |
416 |
|
|
417 |
19 |
AN(re->shardd->backend); |
418 |
19 |
} |
419 |
|
|
420 |
|
static void |
421 |
97 |
shardcfg_backend_add(struct backend_reconfig *re, |
422 |
|
const struct shard_backend *b, uint32_t replicas) |
423 |
|
{ |
424 |
|
unsigned i; |
425 |
97 |
struct shard_backend *bb = re->shardd->backend; |
426 |
|
|
427 |
97 |
if (re->hole_n == 0) { |
428 |
93 |
if (re->shardd->n_backend >= re->shardd->l_backend) { |
429 |
19 |
shardcfg_backend_expand(re); |
430 |
19 |
bb = re->shardd->backend; |
431 |
19 |
} |
432 |
93 |
assert(re->shardd->n_backend < re->shardd->l_backend); |
433 |
93 |
i = re->shardd->n_backend; |
434 |
93 |
} else { |
435 |
4 |
assert(re->hole_i != UINT_MAX); |
436 |
4 |
do { |
437 |
4 |
if (!bb[re->hole_i].backend) |
438 |
4 |
break; |
439 |
0 |
} while (++(re->hole_i) < re->shardd->n_backend + re->hole_n); |
440 |
4 |
assert(re->hole_i < re->shardd->n_backend + re->hole_n); |
441 |
|
|
442 |
4 |
i = (re->hole_i)++; |
443 |
4 |
(re->hole_n)--; |
444 |
|
} |
445 |
|
|
446 |
97 |
re->shardd->n_backend++; |
447 |
97 |
shardcfg_backend_copyin(&bb[i], b); |
448 |
97 |
bb[i].replicas = replicas; |
449 |
97 |
} |
450 |
|
|
451 |
|
void |
452 |
18 |
shardcfg_backend_clear(struct sharddir *shardd) |
453 |
|
{ |
454 |
|
unsigned i; |
455 |
33 |
for (i = 0; i < shardd->n_backend; i++) |
456 |
15 |
shardcfg_backend_free(&shardd->backend[i]); |
457 |
18 |
shardd->n_backend = 0; |
458 |
18 |
} |
459 |
|
|
460 |
|
|
461 |
|
static void |
462 |
29 |
shardcfg_backend_del(struct backend_reconfig *re, struct shard_backend *spec) |
463 |
|
{ |
464 |
29 |
unsigned i, max = re->shardd->n_backend + re->hole_n; |
465 |
29 |
struct shard_backend * const bb = re->shardd->backend; |
466 |
|
|
467 |
476 |
for (i = 0; i < max; i++) { |
468 |
447 |
if (bb[i].backend == NULL) |
469 |
122 |
continue; // hole |
470 |
325 |
if (shardcfg_backend_del_cmp(spec, &bb[i])) |
471 |
294 |
continue; |
472 |
|
|
473 |
31 |
shardcfg_backend_free(&bb[i]); |
474 |
31 |
re->shardd->n_backend--; |
475 |
31 |
if (i < re->shardd->n_backend + re->hole_n) { |
476 |
27 |
(re->hole_n)++; |
477 |
27 |
re->hole_i = vmin(re->hole_i, i); |
478 |
27 |
} |
479 |
31 |
} |
480 |
29 |
VRT_Assign_Backend(&spec->backend, NULL); |
481 |
29 |
} |
482 |
|
|
483 |
|
static void |
484 |
36 |
shardcfg_backend_finalize(struct backend_reconfig *re) |
485 |
|
{ |
486 |
|
unsigned i; |
487 |
36 |
struct shard_backend * const bb = re->shardd->backend; |
488 |
|
|
489 |
42 |
while (re->hole_n > 0) { |
490 |
|
// trim end |
491 |
9 |
i = re->shardd->n_backend + re->hole_n - 1; |
492 |
26 |
while (re->hole_n && bb[i].backend == NULL) { |
493 |
17 |
(re->hole_n)--; |
494 |
17 |
i--; |
495 |
|
} |
496 |
|
|
497 |
9 |
if (re->hole_n == 0) |
498 |
3 |
break; |
499 |
|
|
500 |
6 |
assert(re->hole_i < i); |
501 |
|
|
502 |
6 |
do { |
503 |
6 |
if (!bb[re->hole_i].backend) |
504 |
6 |
break; |
505 |
0 |
} while (++(re->hole_i) <= i); |
506 |
|
|
507 |
6 |
assert(re->hole_i < i); |
508 |
6 |
assert(bb[re->hole_i].backend == NULL); |
509 |
6 |
assert(bb[i].backend != NULL); |
510 |
|
|
511 |
6 |
memcpy(&bb[re->hole_i], &bb[i], sizeof(*bb)); |
512 |
6 |
memset(&bb[i], 0, sizeof(*bb)); |
513 |
|
|
514 |
6 |
(re->hole_n)--; |
515 |
6 |
(re->hole_i)++; |
516 |
|
} |
517 |
|
|
518 |
36 |
assert(re->hole_n == 0); |
519 |
36 |
} |
520 |
|
|
521 |
|
/* |
522 |
|
* ============================================================ |
523 |
|
* work the change tasks |
524 |
|
*/ |
525 |
|
|
526 |
|
static void |
527 |
40 |
shardcfg_apply_change(struct vsl_log *vsl, struct sharddir *shardd, |
528 |
|
const struct shard_change *change, VCL_INT replicas) |
529 |
|
{ |
530 |
|
struct shard_change_task *task, *clear; |
531 |
|
const struct shard_backend *b; |
532 |
|
uint32_t b_replicas; |
533 |
|
|
534 |
120 |
struct backend_reconfig re = { |
535 |
40 |
.shardd = shardd, |
536 |
40 |
.hint = shardd->n_backend, |
537 |
|
.hole_n = 0, |
538 |
|
.hole_i = UINT_MAX |
539 |
|
}; |
540 |
|
|
541 |
|
// XXX assert sharddir_locked(shardd) |
542 |
|
|
543 |
40 |
clear = NULL; |
544 |
197 |
VSTAILQ_FOREACH(task, &change->tasks, list) { |
545 |
157 |
CHECK_OBJ_NOTNULL(task, SHARD_CHANGE_TASK_MAGIC); |
546 |
157 |
switch (task->task) { |
547 |
|
case CLEAR: |
548 |
15 |
clear = task; |
549 |
15 |
re.hint = 0; |
550 |
15 |
break; |
551 |
|
case ADD_BE: |
552 |
113 |
re.hint++; |
553 |
113 |
break; |
554 |
|
case REMOVE_BE: |
555 |
29 |
break; |
556 |
|
default: |
557 |
0 |
INCOMPL(); |
558 |
0 |
} |
559 |
157 |
} |
560 |
|
|
561 |
40 |
if (clear) { |
562 |
13 |
shardcfg_backend_clear(shardd); |
563 |
13 |
clear = VSTAILQ_NEXT(clear, list); |
564 |
13 |
if (clear == NULL) |
565 |
4 |
return; |
566 |
9 |
} |
567 |
|
|
568 |
36 |
task = clear; |
569 |
170 |
VSTAILQ_FOREACH_FROM(task, &change->tasks, list) { |
570 |
134 |
CHECK_OBJ_NOTNULL(task, SHARD_CHANGE_TASK_MAGIC); |
571 |
134 |
switch (task->task) { |
572 |
|
case CLEAR: |
573 |
0 |
assert(task->task != CLEAR); |
574 |
0 |
break; |
575 |
|
case ADD_BE: |
576 |
105 |
b = shardcfg_backend_lookup(&re, task->priv); |
577 |
|
|
578 |
105 |
if (b == NULL) { |
579 |
97 |
assert (task->weight >= 1); |
580 |
97 |
if (replicas * task->weight > UINT32_MAX) |
581 |
0 |
b_replicas = UINT32_MAX; |
582 |
|
else |
583 |
97 |
b_replicas = (uint32_t) // flint |
584 |
97 |
(replicas * task->weight); |
585 |
|
|
586 |
194 |
shardcfg_backend_add(&re, task->priv, |
587 |
97 |
b_replicas); |
588 |
97 |
break; |
589 |
|
} |
590 |
|
|
591 |
8 |
const char * const ident = b->ident; |
592 |
|
|
593 |
8 |
shard_notice(vsl, shardd->name, |
594 |
|
"backend %s%s%s already exists - skipping", |
595 |
|
VRT_BACKEND_string(b->backend), |
596 |
|
ident ? "/" : "", |
597 |
|
ident ? ident : ""); |
598 |
8 |
break; |
599 |
|
case REMOVE_BE: |
600 |
29 |
shardcfg_backend_del(&re, task->priv); |
601 |
29 |
break; |
602 |
|
default: |
603 |
0 |
INCOMPL(); |
604 |
0 |
} |
605 |
134 |
} |
606 |
36 |
shardcfg_backend_finalize(&re); |
607 |
40 |
} |
608 |
|
|
609 |
|
/* |
610 |
|
* ============================================================ |
611 |
|
* top reconfiguration function |
612 |
|
*/ |
613 |
|
|
614 |
|
static VCL_BOOL |
615 |
59 |
change_reconfigure(VRT_CTX, struct shard_change *change, VCL_INT replicas) |
616 |
|
{ |
617 |
|
struct sharddir *shardd; |
618 |
|
|
619 |
59 |
CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC); |
620 |
59 |
assert (replicas > 0); |
621 |
59 |
shardd = change->shardd; |
622 |
59 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
623 |
|
|
624 |
59 |
if (VSTAILQ_FIRST(&change->tasks) == NULL) |
625 |
19 |
return (1); |
626 |
|
|
627 |
40 |
sharddir_wrlock(shardd); |
628 |
|
|
629 |
40 |
shardcfg_apply_change(ctx->vsl, shardd, change, replicas); |
630 |
40 |
shard_change_finish(change); |
631 |
|
|
632 |
40 |
if (shardd->hashcircle) |
633 |
18 |
free(shardd->hashcircle); |
634 |
40 |
shardd->hashcircle = NULL; |
635 |
|
|
636 |
40 |
if (shardd->n_backend == 0) { |
637 |
4 |
shard_err0(ctx->vsl, shardd->name, |
638 |
|
".reconfigure() no backends"); |
639 |
4 |
sharddir_unlock(shardd); |
640 |
4 |
return (0); |
641 |
|
} |
642 |
|
|
643 |
36 |
shardcfg_hashcircle(shardd); |
644 |
36 |
sharddir_unlock(shardd); |
645 |
36 |
return (1); |
646 |
59 |
} |
647 |
|
|
648 |
|
VCL_BOOL |
649 |
42 |
shardcfg_reconfigure(VRT_CTX, struct sharddir *shardd, VCL_INT replicas) |
650 |
|
{ |
651 |
|
struct shard_change *change; |
652 |
|
|
653 |
42 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
654 |
42 |
if (replicas <= 0) { |
655 |
2 |
shard_err(ctx->vsl, shardd->name, |
656 |
|
".reconfigure() invalid replicas argument %ld", replicas); |
657 |
2 |
return (0); |
658 |
|
} |
659 |
|
|
660 |
40 |
change = shard_change_get(ctx, shardd); |
661 |
40 |
if (change == NULL) |
662 |
0 |
return (0); |
663 |
|
|
664 |
40 |
return (change_reconfigure(ctx, change, replicas)); |
665 |
42 |
} |
666 |
|
|
667 |
|
/* |
668 |
|
* ============================================================ |
669 |
|
* misc config related |
670 |
|
*/ |
671 |
|
|
672 |
|
/* only for sharddir_delete() */ |
673 |
|
void |
674 |
5 |
shardcfg_delete(const struct sharddir *shardd) |
675 |
|
{ |
676 |
|
|
677 |
5 |
AZ(shardd->n_backend); |
678 |
5 |
if (shardd->backend) |
679 |
0 |
free(shardd->backend); |
680 |
5 |
if (shardd->hashcircle) |
681 |
0 |
free(shardd->hashcircle); |
682 |
5 |
} |
683 |
|
|
684 |
|
VCL_VOID |
685 |
2 |
shardcfg_set_warmup(struct sharddir *shardd, VCL_REAL ratio) |
686 |
|
{ |
687 |
2 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
688 |
2 |
assert(ratio >= 0 && ratio < 1); |
689 |
2 |
sharddir_wrlock(shardd); |
690 |
2 |
shardd->warmup = ratio; |
691 |
2 |
sharddir_unlock(shardd); |
692 |
2 |
} |
693 |
|
|
694 |
|
VCL_VOID |
695 |
2 |
shardcfg_set_rampup(struct sharddir *shardd, VCL_DURATION duration) |
696 |
|
{ |
697 |
2 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
698 |
2 |
assert(duration >= 0); |
699 |
2 |
sharddir_wrlock(shardd); |
700 |
2 |
shardd->rampup_duration = duration; |
701 |
2 |
sharddir_unlock(shardd); |
702 |
2 |
} |
703 |
|
|
704 |
|
VCL_DURATION |
705 |
162 |
shardcfg_get_rampup(const struct sharddir *shardd, unsigned host) |
706 |
|
{ |
707 |
|
VCL_DURATION r; |
708 |
|
|
709 |
162 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
710 |
|
// assert sharddir_rdlock_held(shardd); |
711 |
162 |
assert (host < shardd->n_backend); |
712 |
|
|
713 |
162 |
if (isnan(shardd->backend[host].rampup)) |
714 |
158 |
r = shardd->rampup_duration; |
715 |
|
else |
716 |
4 |
r = shardd->backend[host].rampup; |
717 |
|
|
718 |
162 |
return (r); |
719 |
|
} |