| | varnish-cache/vmod/vmod_directors_shard.c |
0 |
|
/*- |
1 |
|
* Copyright 2009-2018 UPLEX - Nils Goroll Systemoptimierung |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Authors: Julian Wiesener <jw@uplex.de> |
5 |
|
* Nils Goroll <slink@uplex.de> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include <stdlib.h> |
34 |
|
#include <string.h> |
35 |
|
|
36 |
|
#include "cache/cache.h" |
37 |
|
#include "vcl.h" |
38 |
|
|
39 |
|
#include "vend.h" |
40 |
|
|
41 |
|
#include "vcc_directors_if.h" |
42 |
|
#include "vmod_directors_shard_dir.h" |
43 |
|
#include "vmod_directors_shard_cfg.h" |
44 |
|
#include "vsb.h" |
45 |
|
|
46 |
|
/* ------------------------------------------------------------------------- |
47 |
|
* shard director: LAZY mode (vdi resolve function), parameter objects |
48 |
|
* |
49 |
|
* By associating a parameter object with a shard director, we enable LAZY |
50 |
|
* lookups as with the other directors. Parameter objects are defined with VCL |
51 |
|
* scope (normal vmod objects), but can be overridden per backend request using |
52 |
|
* a task priv. |
53 |
|
* |
54 |
|
* We use the same concept to carry shard.backend() parameters to vdi resolve |
55 |
|
* for LAZY mode: They get saved in a per-director task scope parameter object. |
56 |
|
* |
57 |
|
* Each object points to another object providing defaults for values which are |
58 |
|
* not defined. |
59 |
|
* |
60 |
|
* Actual resolution of the various parameter objects does not happen before |
61 |
|
* they are used, which enables changing them independently (ie, shard |
62 |
|
* .backend() parameters have precedence over an associated parameter object, |
63 |
|
* which by itself can be overridden). |
64 |
|
* |
65 |
|
* Overview of parameter objects (pointers are alternatives) |
66 |
|
* |
67 |
|
* shard() director shard_param() object default param |
68 |
|
* |
69 |
|
* ---------------------------------> vmod static |
70 |
|
* VCL obj / -> |
71 |
|
* .param -+---------> VCL obj / _ |
72 |
|
* .default -------- /| |
73 |
|
* / |
74 |
|
* ^ / |
75 |
|
* | / |
76 |
|
* / |
77 |
|
* .default / |
78 |
|
* -------------> TASK priv / |
79 |
|
* / / |
80 |
|
* .default ----------------------------- |
81 |
|
* TASK priv |
82 |
|
*/ |
83 |
|
|
84 |
|
/* ------------------------------------------------------------------------- |
85 |
|
* method arguments and set parameters bitmask in vmod_directors_shard_param |
86 |
|
*/ |
87 |
|
|
88 |
|
#define arg_by ((uint32_t)1) |
89 |
|
#define arg_key ((uint32_t)1 << 1) |
90 |
|
#define arg_key_blob ((uint32_t)1 << 2) |
91 |
|
#define arg_alt ((uint32_t)1 << 3) |
92 |
|
#define arg_warmup ((uint32_t)1 << 4) |
93 |
|
#define arg_rampup ((uint32_t)1 << 5) |
94 |
|
#define arg_healthy ((uint32_t)1 << 6) |
95 |
|
#define arg_param ((uint32_t)1 << 7) |
96 |
|
#define arg_resolve ((uint32_t)1 << 8) |
97 |
|
#define arg_mask_ ((arg_resolve << 1) - 1) |
98 |
|
/* allowed in shard_param.set */ |
99 |
|
#define arg_mask_set_ (arg_param - 1) |
100 |
|
/* allowed in shard_param */ |
101 |
|
#define arg_mask_param_ ( arg_mask_set_ \ |
102 |
|
& ~arg_key \ |
103 |
|
& ~arg_key_blob ) |
104 |
|
|
105 |
|
/* ------------------------------------------------------------------------- |
106 |
|
* shard parameters - declaration & defaults |
107 |
|
*/ |
108 |
|
enum vmod_directors_shard_param_scope { |
109 |
|
_SCOPE_INVALID = 0, |
110 |
|
SCOPE_VMOD, |
111 |
|
SCOPE_VCL, |
112 |
|
SCOPE_TASK, |
113 |
|
SCOPE_STACK |
114 |
|
}; |
115 |
|
|
116 |
|
struct vmod_directors_shard_param; |
117 |
|
|
118 |
|
#define VMOD_SHARD_SHARD_PARAM_BLOB 0xdf5ca116 |
119 |
|
|
120 |
|
struct vmod_directors_shard_param { |
121 |
|
unsigned magic; |
122 |
|
#define VMOD_SHARD_SHARD_PARAM_MAGIC 0xdf5ca117 |
123 |
|
|
124 |
|
/* internals */ |
125 |
|
uint32_t key; |
126 |
|
const char *vcl_name; |
127 |
|
const struct vmod_directors_shard_param *defaults; |
128 |
|
enum vmod_directors_shard_param_scope scope; |
129 |
|
|
130 |
|
/* parameters */ |
131 |
|
VCL_ENUM by; |
132 |
|
VCL_ENUM healthy; |
133 |
|
uint32_t mask; |
134 |
|
VCL_BOOL rampup; |
135 |
|
VCL_INT alt; |
136 |
|
VCL_REAL warmup; |
137 |
|
}; |
138 |
|
|
139 |
|
static const struct vmod_directors_shard_param shard_param_default = { |
140 |
|
.magic = VMOD_SHARD_SHARD_PARAM_MAGIC, |
141 |
|
|
142 |
|
.key = 0, |
143 |
|
.vcl_name = "builtin defaults", |
144 |
|
.defaults = NULL, |
145 |
|
.scope = SCOPE_VMOD, |
146 |
|
|
147 |
|
.mask = arg_mask_param_, |
148 |
|
.rampup = 1, |
149 |
|
.alt = 0, |
150 |
|
.warmup = -1, |
151 |
|
}; |
152 |
|
|
153 |
|
#define default_by(ptr) (ptr == NULL ? VENUM(HASH) : ptr) |
154 |
|
#define default_healthy(ptr) (ptr == NULL ? VENUM(CHOSEN) : ptr) |
155 |
|
|
156 |
|
static struct vmod_directors_shard_param * |
157 |
|
shard_param_stack(struct vmod_directors_shard_param *p, |
158 |
|
const struct vmod_directors_shard_param *pa, const char *who); |
159 |
|
|
160 |
|
static const struct vmod_directors_shard_param * |
161 |
|
shard_param_task_r(VRT_CTX, const void *id, const char *who, |
162 |
|
const struct vmod_directors_shard_param *pa); |
163 |
|
|
164 |
|
static struct vmod_directors_shard_param * |
165 |
|
shard_param_task_l(VRT_CTX, const void *id, const char *who, |
166 |
|
const struct vmod_directors_shard_param *pa); |
167 |
|
|
168 |
|
static const struct vmod_directors_shard_param * |
169 |
|
shard_param_blob(VCL_BLOB blob); |
170 |
|
|
171 |
|
static const struct vmod_directors_shard_param * |
172 |
|
vmod_shard_param_read(VRT_CTX, const void *id, const char *who, |
173 |
|
const struct vmod_directors_shard_param *p, |
174 |
|
struct vmod_directors_shard_param *pstk); |
175 |
|
|
176 |
|
// XXX #3329 #3330 revisit - for now, treat pipe like backend |
177 |
|
#define SHARD_VCL_TASK_REQ (VCL_MET_TASK_C & ~VCL_MET_PIPE) |
178 |
|
#define SHARD_VCL_TASK_BEREQ (VCL_MET_TASK_B | VCL_MET_PIPE) |
179 |
|
/* ------------------------------------------------------------------------- |
180 |
|
* shard vmod interface |
181 |
|
*/ |
182 |
|
static vdi_healthy_f vmod_shard_healthy; |
183 |
|
static vdi_resolve_f vmod_shard_resolve; |
184 |
|
static vdi_list_f vmod_shard_list; |
185 |
|
|
186 |
|
struct vmod_directors_shard { |
187 |
|
unsigned magic; |
188 |
|
#define VMOD_SHARD_SHARD_MAGIC 0x6e63e1bf |
189 |
|
struct sharddir *shardd; |
190 |
|
VCL_BACKEND dir; |
191 |
|
}; |
192 |
|
|
193 |
|
static void |
194 |
25 |
shard__assert(void) |
195 |
|
{ |
196 |
|
VCL_INT t1; |
197 |
|
uint32_t t2a, t2b; |
198 |
|
|
199 |
|
/* we put our uint32 key in a VCL_INT container */ |
200 |
25 |
assert(sizeof(VCL_INT) >= sizeof(uint32_t)); |
201 |
25 |
t2a = UINT32_MAX; |
202 |
25 |
t1 = (VCL_INT)t2a; |
203 |
25 |
t2b = (uint32_t)t1; |
204 |
25 |
assert(t2a == t2b); |
205 |
25 |
} |
206 |
|
|
207 |
|
static void v_matchproto_(vdi_release_f) |
208 |
5 |
vmod_shard_release(VCL_BACKEND dir) |
209 |
|
{ |
210 |
|
struct sharddir *shardd; |
211 |
|
|
212 |
5 |
CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC); |
213 |
5 |
sharddir_release(shardd); |
214 |
5 |
} |
215 |
|
|
216 |
|
static void v_matchproto_(vdi_destroy_f) |
217 |
5 |
vmod_shard_destroy(VCL_BACKEND dir) |
218 |
|
{ |
219 |
|
struct sharddir *shardd; |
220 |
|
|
221 |
5 |
CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC); |
222 |
5 |
sharddir_delete(&shardd); |
223 |
5 |
} |
224 |
|
|
225 |
|
static const struct vdi_methods vmod_shard_methods[1] = {{ |
226 |
|
.magic = VDI_METHODS_MAGIC, |
227 |
|
.type = "shard", |
228 |
|
.resolve = vmod_shard_resolve, |
229 |
|
.healthy = vmod_shard_healthy, |
230 |
|
.release = vmod_shard_release, |
231 |
|
.destroy = vmod_shard_destroy, |
232 |
|
.list = vmod_shard_list |
233 |
|
}}; |
234 |
|
|
235 |
|
|
236 |
|
VCL_VOID v_matchproto_(td_directors_shard__init) |
237 |
25 |
vmod_shard__init(VRT_CTX, struct vmod_directors_shard **vshardp, |
238 |
|
const char *vcl_name) |
239 |
|
{ |
240 |
|
struct vmod_directors_shard *vshard; |
241 |
|
|
242 |
25 |
shard__assert(); |
243 |
|
|
244 |
25 |
AN(vshardp); |
245 |
25 |
AZ(*vshardp); |
246 |
25 |
ALLOC_OBJ(vshard, VMOD_SHARD_SHARD_MAGIC); |
247 |
25 |
AN(vshard); |
248 |
|
|
249 |
25 |
*vshardp = vshard; |
250 |
25 |
sharddir_new(&vshard->shardd, vcl_name, &shard_param_default); |
251 |
|
|
252 |
50 |
vshard->dir = VRT_AddDirector(ctx, vmod_shard_methods, vshard->shardd, |
253 |
25 |
"%s", vcl_name); |
254 |
25 |
} |
255 |
|
|
256 |
|
VCL_VOID v_matchproto_(td_directors_shard__fini) |
257 |
5 |
vmod_shard__fini(struct vmod_directors_shard **vshardp) |
258 |
|
{ |
259 |
|
struct vmod_directors_shard *vshard; |
260 |
|
|
261 |
5 |
TAKE_OBJ_NOTNULL(vshard, vshardp, VMOD_SHARD_SHARD_MAGIC); |
262 |
5 |
VRT_DelDirector(&vshard->dir); |
263 |
5 |
FREE_OBJ(vshard); |
264 |
5 |
} |
265 |
|
|
266 |
|
VCL_INT v_matchproto_(td_directors_shard_key) |
267 |
11 |
vmod_shard_key(VRT_CTX, struct vmod_directors_shard *vshard, VCL_STRANDS s) |
268 |
|
{ |
269 |
|
|
270 |
11 |
(void)ctx; |
271 |
11 |
(void)vshard; |
272 |
|
|
273 |
11 |
return ((VCL_INT)VRT_HashStrands32(s)); |
274 |
|
} |
275 |
|
|
276 |
|
VCL_VOID v_matchproto_(td_directors_set_warmup) |
277 |
4 |
vmod_shard_set_warmup(VRT_CTX, struct vmod_directors_shard *vshard, |
278 |
|
VCL_REAL probability) |
279 |
|
{ |
280 |
4 |
CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC); |
281 |
4 |
if (probability < 0 || probability >= 1) { |
282 |
2 |
shard_notice(ctx->vsl, vshard->shardd->name, |
283 |
|
".set_warmup(%f) ignored", probability); |
284 |
2 |
return; |
285 |
|
} |
286 |
2 |
shardcfg_set_warmup(vshard->shardd, probability); |
287 |
4 |
} |
288 |
|
|
289 |
|
VCL_VOID v_matchproto_(td_directors_set_rampup) |
290 |
2 |
vmod_shard_set_rampup(VRT_CTX, struct vmod_directors_shard *vshard, |
291 |
|
VCL_DURATION duration) |
292 |
|
{ |
293 |
2 |
(void)ctx; |
294 |
2 |
CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC); |
295 |
2 |
shardcfg_set_rampup(vshard->shardd, duration); |
296 |
2 |
} |
297 |
|
|
298 |
|
VCL_VOID v_matchproto_(td_directors_shard_associate) |
299 |
4 |
vmod_shard_associate(VRT_CTX, |
300 |
|
struct vmod_directors_shard *vshard, VCL_BLOB b) |
301 |
|
{ |
302 |
|
const struct vmod_directors_shard_param *ppt; |
303 |
4 |
CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC); |
304 |
|
|
305 |
4 |
if (b == NULL) { |
306 |
1 |
sharddir_set_param(vshard->shardd, &shard_param_default); |
307 |
1 |
return; |
308 |
|
} |
309 |
|
|
310 |
3 |
ppt = shard_param_blob(b); |
311 |
|
|
312 |
3 |
if (ppt == NULL) { |
313 |
1 |
shard_fail(ctx, vshard->shardd->name, "%s", |
314 |
|
"shard .associate param invalid"); |
315 |
1 |
return; |
316 |
|
} |
317 |
|
|
318 |
2 |
sharddir_set_param(vshard->shardd, ppt); |
319 |
4 |
} |
320 |
|
|
321 |
|
VCL_BOOL v_matchproto_(td_directors_shard_add_backend) |
322 |
114 |
vmod_shard_add_backend(VRT_CTX, struct vmod_directors_shard *vshard, |
323 |
|
struct VARGS(shard_add_backend) *args) |
324 |
|
{ |
325 |
114 |
VCL_REAL weight = 1; |
326 |
|
|
327 |
114 |
CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC); |
328 |
|
|
329 |
114 |
if (args->backend == NULL) { |
330 |
1 |
shard_fail(ctx, vshard->shardd->name, "%s", |
331 |
|
"None backend cannot be added"); |
332 |
1 |
return (0); |
333 |
|
} |
334 |
|
|
335 |
113 |
if (args->valid_weight) { |
336 |
3 |
if (args->weight >= 1) |
337 |
2 |
weight = args->weight; |
338 |
|
else |
339 |
1 |
shard_notice(ctx->vsl, vshard->shardd->name, |
340 |
|
".add_backend(weight=%f) ignored", args->weight); |
341 |
3 |
} |
342 |
|
|
343 |
226 |
return (shardcfg_add_backend(ctx, vshard->shardd, args->backend, |
344 |
113 |
args->valid_ident ? args->ident : NULL, |
345 |
113 |
args->valid_rampup ? args->rampup : nan(""), |
346 |
113 |
weight)); |
347 |
114 |
} |
348 |
|
|
349 |
|
VCL_BOOL v_matchproto_(td_directors_shard_remove_backend) |
350 |
30 |
vmod_shard_remove_backend(VRT_CTX, struct vmod_directors_shard *vshard, |
351 |
|
struct VARGS(shard_remove_backend) *args) |
352 |
|
{ |
353 |
30 |
VCL_BACKEND be = args->valid_backend ? args->backend : NULL; |
354 |
30 |
VCL_STRING ident = args->valid_ident ? args->ident : NULL; |
355 |
|
|
356 |
30 |
CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC); |
357 |
|
|
358 |
30 |
if (be == NULL && ident == NULL) { |
359 |
1 |
shard_fail(ctx, vshard->shardd->name, "%s", |
360 |
|
".remove_backend(): either backend or ident are required"); |
361 |
1 |
return (0); |
362 |
|
} |
363 |
|
|
364 |
29 |
return (shardcfg_remove_backend(ctx, vshard->shardd, be, ident)); |
365 |
30 |
} |
366 |
|
|
367 |
|
VCL_BOOL v_matchproto_(td_directors_shard_clear) |
368 |
15 |
vmod_shard_clear(VRT_CTX, struct vmod_directors_shard *vshard) |
369 |
|
{ |
370 |
15 |
CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC); |
371 |
15 |
return (shardcfg_clear(ctx, vshard->shardd)); |
372 |
|
} |
373 |
|
|
374 |
|
VCL_BOOL v_matchproto_(td_directors_shard_reconfigure) |
375 |
42 |
vmod_shard_reconfigure(VRT_CTX, struct vmod_directors_shard *vshard, |
376 |
|
VCL_INT replicas) |
377 |
|
{ |
378 |
42 |
return (shardcfg_reconfigure(ctx, vshard->shardd, replicas)); |
379 |
|
} |
380 |
|
|
381 |
|
static inline uint32_t |
382 |
129 |
shard_get_key(VRT_CTX, const struct vmod_directors_shard_param *p) |
383 |
|
{ |
384 |
|
struct http *http; |
385 |
129 |
VCL_ENUM by = default_by(p->by); |
386 |
|
|
387 |
129 |
if (by == VENUM(KEY) || by == VENUM(BLOB)) |
388 |
102 |
return (p->key); |
389 |
27 |
if (by == VENUM(HASH) && ctx->bo != NULL) { |
390 |
7 |
CHECK_OBJ(ctx->bo, BUSYOBJ_MAGIC); |
391 |
7 |
return (vbe32dec(ctx->bo->digest)); |
392 |
|
} |
393 |
20 |
if (by == VENUM(HASH) || by == VENUM(URL)) { |
394 |
20 |
if (ctx->http_req) { |
395 |
14 |
AN(http = ctx->http_req); |
396 |
14 |
} else { |
397 |
6 |
AN(ctx->http_bereq); |
398 |
6 |
AN(http = ctx->http_bereq); |
399 |
|
} |
400 |
20 |
return (VRT_HashStrands32(TOSTRAND(http->hd[HTTP_HDR_URL].b))); |
401 |
|
} |
402 |
0 |
WRONG("by enum"); |
403 |
129 |
} |
404 |
|
|
405 |
|
/* |
406 |
|
* merge parameters to resolve all undef values |
407 |
|
* key is to be calculated after merging |
408 |
|
*/ |
409 |
|
static void |
410 |
576 |
shard_param_merge(struct vmod_directors_shard_param *to, |
411 |
|
const struct vmod_directors_shard_param *from) |
412 |
|
{ |
413 |
576 |
CHECK_OBJ_NOTNULL(to, VMOD_SHARD_SHARD_PARAM_MAGIC); |
414 |
576 |
assert((to->mask & ~arg_mask_param_) == 0); |
415 |
|
|
416 |
576 |
if (to->mask == arg_mask_param_) |
417 |
0 |
return; |
418 |
|
|
419 |
576 |
CHECK_OBJ_NOTNULL(from, VMOD_SHARD_SHARD_PARAM_MAGIC); |
420 |
576 |
assert((from->mask & ~arg_mask_param_) == 0); |
421 |
|
|
422 |
576 |
if ((to->mask & arg_by) == 0 && (from->mask & arg_by) != 0) { |
423 |
209 |
to->by = from->by; |
424 |
209 |
if (from->by == VENUM(KEY) || from->by == VENUM(BLOB)) |
425 |
102 |
to->key = from->key; |
426 |
209 |
} |
427 |
|
|
428 |
|
#define mrg(to, from, field) do { \ |
429 |
|
if (((to)->mask & arg_ ## field) == 0 && \ |
430 |
|
((from)->mask & arg_ ## field) != 0) \ |
431 |
|
(to)->field = (from)->field; \ |
432 |
|
} while(0) |
433 |
|
|
434 |
576 |
mrg(to, from, healthy); |
435 |
576 |
mrg(to, from, rampup); |
436 |
576 |
mrg(to, from, alt); |
437 |
576 |
mrg(to, from, warmup); |
438 |
|
#undef mrg |
439 |
|
|
440 |
576 |
to->mask |= from->mask; |
441 |
|
|
442 |
576 |
if (to->mask == arg_mask_param_) |
443 |
254 |
return; |
444 |
|
|
445 |
322 |
AN(from->defaults); |
446 |
322 |
shard_param_merge(to, from->defaults); |
447 |
576 |
} |
448 |
|
|
449 |
|
static uint32_t |
450 |
10 |
shard_blob_key(VCL_BLOB key_blob) |
451 |
|
{ |
452 |
10 |
uint8_t k[4] = { 0 }; |
453 |
|
const uint8_t *b; |
454 |
|
size_t i, ki; |
455 |
|
|
456 |
10 |
CHECK_OBJ_NOTNULL(key_blob, VRT_BLOB_MAGIC); |
457 |
10 |
AN(key_blob->blob); |
458 |
10 |
assert(key_blob->len > 0); |
459 |
|
|
460 |
10 |
if (key_blob->len >= 4) |
461 |
10 |
ki = 0; |
462 |
|
else |
463 |
0 |
ki = 4 - key_blob->len; |
464 |
|
|
465 |
10 |
b = key_blob->blob; |
466 |
50 |
for (i = 0; ki < 4; i++, ki++) |
467 |
40 |
k[ki] = b[i]; |
468 |
10 |
assert(i <= key_blob->len); |
469 |
|
|
470 |
10 |
return (vbe32dec(k)); |
471 |
|
} |
472 |
|
|
473 |
|
/* |
474 |
|
* convert vmod interface valid_* to our bitmask |
475 |
|
*/ |
476 |
|
|
477 |
|
#define tobit(args, name) ((args)->valid_##name ? arg_##name : 0) |
478 |
|
|
479 |
|
static uint32_t |
480 |
107 |
shard_backendarg_mask_(const struct VARGS(shard_backend) * const a) |
481 |
|
{ |
482 |
321 |
return (tobit(a, by) | |
483 |
214 |
tobit(a, key) | |
484 |
214 |
tobit(a, key_blob) | |
485 |
214 |
tobit(a, alt) | |
486 |
214 |
tobit(a, warmup) | |
487 |
214 |
tobit(a, rampup) | |
488 |
214 |
tobit(a, healthy) | |
489 |
214 |
tobit(a, param) | |
490 |
107 |
tobit(a, resolve)); |
491 |
|
} |
492 |
|
static uint32_t |
493 |
98 |
shard_param_set_mask(const struct VARGS(shard_param_set) * const a) |
494 |
|
{ |
495 |
294 |
return (tobit(a, by) | |
496 |
196 |
tobit(a, key) | |
497 |
196 |
tobit(a, key_blob) | |
498 |
196 |
tobit(a, alt) | |
499 |
196 |
tobit(a, warmup) | |
500 |
196 |
tobit(a, rampup) | |
501 |
98 |
tobit(a, healthy)); |
502 |
|
} |
503 |
|
#undef tobit |
504 |
|
|
505 |
|
/* |
506 |
|
* check arguments and return in a struct param |
507 |
|
*/ |
508 |
|
static struct vmod_directors_shard_param * |
509 |
195 |
shard_param_args(VRT_CTX, |
510 |
|
struct vmod_directors_shard_param *p, const char *func, |
511 |
|
uint32_t args, VCL_ENUM by_s, VCL_INT key_int, VCL_BLOB key_blob, |
512 |
|
VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy_s) |
513 |
|
{ |
514 |
|
|
515 |
195 |
CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC); |
516 |
195 |
AN(p->vcl_name); |
517 |
|
|
518 |
195 |
assert((args & ~arg_mask_set_) == 0); |
519 |
|
|
520 |
195 |
if (!(args & arg_by)) |
521 |
50 |
by_s = NULL; |
522 |
195 |
by_s = default_by(by_s); |
523 |
|
|
524 |
|
/* by_s / key_int / key_blob */ |
525 |
195 |
if (by_s == VENUM(KEY)) { |
526 |
80 |
if ((args & arg_key) == 0) { |
527 |
1 |
shard_fail(ctx, p->vcl_name, |
528 |
|
"%s missing key argument with by=%s", |
529 |
|
func, by_s); |
530 |
1 |
return (NULL); |
531 |
|
} |
532 |
79 |
if (key_int < 0 || key_int > UINT32_MAX) { |
533 |
1 |
shard_fail(ctx, p->vcl_name, |
534 |
|
"%s invalid key argument %jd with by=%s", |
535 |
|
func, (intmax_t)key_int, by_s); |
536 |
1 |
return (NULL); |
537 |
|
} |
538 |
78 |
assert(key_int >= 0); |
539 |
78 |
assert(key_int <= UINT32_MAX); |
540 |
78 |
p->key = (uint32_t)key_int; |
541 |
193 |
} else if (by_s == VENUM(BLOB)) { |
542 |
12 |
if ((args & arg_key_blob) == 0) { |
543 |
1 |
shard_fail(ctx, p->vcl_name, |
544 |
|
"%s missing key_blob argument with by=%s", |
545 |
|
func, by_s); |
546 |
1 |
return (NULL); |
547 |
|
} |
548 |
11 |
CHECK_OBJ_ORNULL(key_blob, VRT_BLOB_MAGIC); |
549 |
11 |
if (key_blob == NULL || key_blob->len == 0 || |
550 |
10 |
key_blob->blob == NULL) { |
551 |
1 |
shard_err(ctx->vsl, p->vcl_name, |
552 |
|
"%s by=BLOB but no or empty key_blob - using key 0", |
553 |
|
func); |
554 |
1 |
p->key = 0; |
555 |
1 |
} else |
556 |
10 |
p->key = shard_blob_key(key_blob); |
557 |
114 |
} else if (by_s == VENUM(HASH) || by_s == VENUM(URL)) { |
558 |
103 |
if (args & (arg_key|arg_key_blob)) { |
559 |
2 |
shard_fail(ctx, p->vcl_name, |
560 |
|
"%s key and key_blob arguments are " |
561 |
|
"invalid with by=%s", func, by_s); |
562 |
2 |
return (NULL); |
563 |
|
} |
564 |
101 |
} else { |
565 |
0 |
WRONG("by enum"); |
566 |
|
} |
567 |
190 |
p->by = by_s; |
568 |
|
|
569 |
190 |
if (args & arg_alt) { |
570 |
63 |
if (alt < 0) { |
571 |
1 |
shard_fail(ctx, p->vcl_name, |
572 |
|
"%s invalid alt argument %jd", |
573 |
|
func, (intmax_t)alt); |
574 |
1 |
return (NULL); |
575 |
|
} |
576 |
62 |
p->alt = alt; |
577 |
62 |
} |
578 |
|
|
579 |
189 |
if (args & arg_warmup) { |
580 |
19 |
if ((warmup < 0 && warmup != -1) || warmup > 1) { |
581 |
2 |
shard_fail(ctx, p->vcl_name, |
582 |
|
"%s invalid warmup argument %f", |
583 |
|
func, warmup); |
584 |
2 |
return (NULL); |
585 |
|
} |
586 |
17 |
p->warmup = warmup; |
587 |
17 |
} |
588 |
|
|
589 |
187 |
if (args & arg_rampup) |
590 |
6 |
p->rampup = !!rampup; |
591 |
|
|
592 |
187 |
if (args & arg_healthy) |
593 |
33 |
p->healthy = healthy_s; |
594 |
|
|
595 |
187 |
p->mask = args & arg_mask_param_; |
596 |
187 |
return (p); |
597 |
195 |
} |
598 |
|
|
599 |
|
VCL_BACKEND v_matchproto_(td_directors_shard_backend) |
600 |
107 |
vmod_shard_backend(VRT_CTX, struct vmod_directors_shard *vshard, |
601 |
|
struct VARGS(shard_backend) *a) |
602 |
|
{ |
603 |
|
struct sharddir *shardd; |
604 |
|
struct vmod_directors_shard_param pstk; |
605 |
107 |
struct vmod_directors_shard_param *pp = NULL; |
606 |
|
const struct vmod_directors_shard_param *ppt; |
607 |
|
VCL_ENUM resolve; |
608 |
107 |
uint32_t args = shard_backendarg_mask_(a); |
609 |
|
|
610 |
107 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
611 |
107 |
CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC); |
612 |
107 |
shardd = vshard->shardd; |
613 |
107 |
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); |
614 |
107 |
assert((args & ~arg_mask_) == 0); |
615 |
|
|
616 |
107 |
if (args & arg_resolve) |
617 |
49 |
resolve = a->resolve; |
618 |
58 |
else if (ctx->method & VCL_MET_TASK_H) |
619 |
1 |
resolve = VENUM(LAZY); |
620 |
|
else |
621 |
57 |
resolve = VENUM(NOW); |
622 |
|
|
623 |
107 |
if (resolve == VENUM(LAZY)) { |
624 |
16 |
if ((args & ~arg_resolve) == 0) { |
625 |
7 |
AN(vshard->dir); |
626 |
7 |
return (vshard->dir); |
627 |
|
} |
628 |
|
|
629 |
9 |
if ((ctx->method & SHARD_VCL_TASK_BEREQ) == 0) { |
630 |
1 |
shard_fail(ctx, shardd->name, "%s", |
631 |
|
".backend(resolve=LAZY) with other " |
632 |
|
"parameters can only be used in backend/pipe " |
633 |
|
"context"); |
634 |
1 |
return (NULL); |
635 |
|
} |
636 |
|
|
637 |
16 |
pp = shard_param_task_l(ctx, shardd, shardd->name, |
638 |
8 |
shardd->param); |
639 |
8 |
if (pp == NULL) |
640 |
0 |
return (NULL); |
641 |
99 |
} else if (resolve == VENUM(NOW)) { |
642 |
91 |
if (ctx->method & VCL_MET_TASK_H) { |
643 |
1 |
shard_fail(ctx, shardd->name, "%s", |
644 |
|
".backend(resolve=NOW) cannot be " |
645 |
|
"used in vcl_init{}/vcl_fini{}"); |
646 |
1 |
return (NULL); |
647 |
|
} |
648 |
180 |
ppt = shard_param_task_r(ctx, shardd, shardd->name, |
649 |
90 |
shardd->param); |
650 |
90 |
AN(ppt); |
651 |
90 |
pp = shard_param_stack(&pstk, ppt, shardd->name); |
652 |
90 |
} else { |
653 |
0 |
WRONG("resolve enum"); |
654 |
|
} |
655 |
|
|
656 |
98 |
AN(pp); |
657 |
|
|
658 |
98 |
if (args & arg_param) { |
659 |
3 |
ppt = shard_param_blob(a->param); |
660 |
3 |
if (ppt == NULL) { |
661 |
1 |
shard_fail(ctx, shardd->name, "%s", |
662 |
|
".backend(key_blob) param invalid"); |
663 |
1 |
return (NULL); |
664 |
|
} |
665 |
2 |
pp->defaults = ppt; |
666 |
2 |
} |
667 |
|
|
668 |
194 |
pp = shard_param_args(ctx, pp, "shard.backend()", |
669 |
97 |
args & arg_mask_set_, |
670 |
97 |
a->by, a->key, a->key_blob, a->alt, a->warmup, |
671 |
97 |
a->rampup, a->healthy); |
672 |
97 |
if (pp == NULL) |
673 |
0 |
return (NULL); |
674 |
|
|
675 |
97 |
if (resolve == VENUM(LAZY)) |
676 |
8 |
return (vshard->dir); |
677 |
|
|
678 |
89 |
assert(resolve == VENUM(NOW)); |
679 |
89 |
shard_param_merge(pp, pp->defaults); |
680 |
178 |
return (sharddir_pick_be(ctx, shardd, shard_get_key(ctx, pp), |
681 |
89 |
pp->alt, pp->warmup, pp->rampup, pp->healthy)); |
682 |
107 |
} |
683 |
|
|
684 |
|
static VCL_BOOL v_matchproto_(vdi_healthy) |
685 |
16 |
vmod_shard_healthy(VRT_CTX, VCL_BACKEND dir, VCL_TIME *changed) |
686 |
|
{ |
687 |
|
struct sharddir *shardd; |
688 |
|
|
689 |
16 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
690 |
16 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
691 |
16 |
CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC); |
692 |
16 |
return (sharddir_any_healthy(ctx, shardd, changed)); |
693 |
|
} |
694 |
|
|
695 |
|
static VCL_BACKEND v_matchproto_(vdi_resolve_f) |
696 |
15 |
vmod_shard_resolve(VRT_CTX, VCL_BACKEND dir) |
697 |
|
{ |
698 |
|
struct sharddir *shardd; |
699 |
|
struct vmod_directors_shard_param pstk[1]; |
700 |
|
const struct vmod_directors_shard_param *pp; |
701 |
|
|
702 |
15 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
703 |
15 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
704 |
15 |
CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC); |
705 |
|
|
706 |
30 |
pp = vmod_shard_param_read(ctx, shardd, shardd->name, |
707 |
15 |
shardd->param, pstk); |
708 |
15 |
CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC); |
709 |
|
|
710 |
30 |
return (sharddir_pick_be(ctx, shardd, |
711 |
15 |
shard_get_key(ctx, pp), pp->alt, pp->warmup, |
712 |
15 |
pp->rampup, pp->healthy)); |
713 |
|
} |
714 |
|
|
715 |
|
static void v_matchproto_(vdi_list_f) |
716 |
30 |
vmod_shard_list(VRT_CTX, VCL_BACKEND dir, struct vsb *vsb, int pflag, int jflag) |
717 |
|
{ |
718 |
|
struct sharddir *shardd; |
719 |
|
struct shard_backend *sbe; |
720 |
30 |
VCL_TIME c, changed = 0; |
721 |
|
VCL_DURATION rampup_d, d; |
722 |
|
VCL_BACKEND be; |
723 |
|
VCL_BOOL h; |
724 |
30 |
unsigned i, nh = 0; |
725 |
|
double rampup_p; |
726 |
|
|
727 |
30 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
728 |
30 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
729 |
30 |
CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC); |
730 |
|
|
731 |
30 |
if (pflag) { |
732 |
4 |
if (jflag) { |
733 |
1 |
VSB_cat(vsb, "{\n"); |
734 |
1 |
VSB_indent(vsb, 2); |
735 |
1 |
VSB_printf(vsb, "\"warmup\": %f,\n", shardd->warmup); |
736 |
2 |
VSB_printf(vsb, "\"rampup_duration\": %f,\n", |
737 |
1 |
shardd->rampup_duration); |
738 |
1 |
VSB_cat(vsb, "\"backends\": {\n"); |
739 |
1 |
VSB_indent(vsb, 2); |
740 |
1 |
} else { |
741 |
3 |
VSB_cat(vsb, "\n\n\tBackend\tIdent\tHealth\t" |
742 |
|
"Rampup Remaining\n"); |
743 |
|
} |
744 |
4 |
} |
745 |
|
|
746 |
30 |
sharddir_rdlock(shardd); |
747 |
121 |
for (i = 0; i < shardd->n_backend; i++) { |
748 |
91 |
sbe = &shardd->backend[i]; |
749 |
91 |
AN(sbe); |
750 |
91 |
be = sbe->backend; |
751 |
91 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
752 |
|
|
753 |
91 |
c = 0; |
754 |
91 |
h = VRT_Healthy(ctx, be, &c); |
755 |
91 |
if (h) |
756 |
82 |
nh++; |
757 |
91 |
if (c > changed) |
758 |
70 |
changed = c; |
759 |
91 |
if ((pflag) == 0) |
760 |
75 |
continue; |
761 |
|
|
762 |
16 |
d = ctx->now - c; |
763 |
16 |
rampup_d = shardcfg_get_rampup(shardd, i); |
764 |
16 |
if (! h) { |
765 |
2 |
rampup_p = 0.0; |
766 |
2 |
rampup_d = 0.0; |
767 |
16 |
} else if (d < rampup_d) { |
768 |
6 |
rampup_p = d / rampup_d; |
769 |
6 |
rampup_d -= d; |
770 |
6 |
} else { |
771 |
8 |
rampup_p = 1.0; |
772 |
8 |
rampup_d = 0.0; |
773 |
|
} |
774 |
|
|
775 |
16 |
if (jflag) { |
776 |
4 |
if (i) |
777 |
3 |
VSB_cat(vsb, ",\n"); |
778 |
8 |
VSB_printf(vsb, "\"%s\": {\n", |
779 |
4 |
be->vcl_name); |
780 |
4 |
VSB_indent(vsb, 2); |
781 |
8 |
VSB_printf(vsb, "\"ident\": \"%s\",\n", |
782 |
4 |
sbe->ident ? sbe->ident : be->vcl_name); |
783 |
8 |
VSB_printf(vsb, "\"health\": \"%s\",\n", |
784 |
4 |
h ? "healthy" : "sick"); |
785 |
4 |
VSB_printf(vsb, "\"rampup\": %f,\n", rampup_p); |
786 |
8 |
VSB_printf(vsb, "\"rampup_remaining\": %.3f\n", |
787 |
4 |
rampup_d); |
788 |
4 |
VSB_indent(vsb, -2); |
789 |
4 |
VSB_cat(vsb, "}"); |
790 |
4 |
} else { |
791 |
24 |
VSB_printf(vsb, "\t%s\t%s\t%s\t%6.2f%% %8.3fs\n", |
792 |
12 |
be->vcl_name, |
793 |
12 |
sbe->ident ? sbe->ident : be->vcl_name, |
794 |
12 |
h ? "healthy" : "sick", |
795 |
12 |
rampup_p * 100, rampup_d); |
796 |
|
} |
797 |
16 |
} |
798 |
30 |
sharddir_unlock(shardd); |
799 |
|
|
800 |
30 |
if (jflag && (pflag)) { |
801 |
1 |
VSB_cat(vsb, "\n"); |
802 |
1 |
VSB_indent(vsb, -2); |
803 |
1 |
VSB_cat(vsb, "}\n"); |
804 |
1 |
VSB_indent(vsb, -2); |
805 |
1 |
VSB_cat(vsb, "},\n"); |
806 |
1 |
} |
807 |
|
|
808 |
30 |
if (pflag) |
809 |
4 |
return; |
810 |
|
|
811 |
26 |
if (jflag) |
812 |
4 |
VSB_printf(vsb, "[%u, %u, \"%s\"]", nh, i, |
813 |
2 |
nh ? "healthy" : "sick"); |
814 |
|
else |
815 |
24 |
VSB_printf(vsb, "%u/%u\t%s", nh, i, nh ? "healthy" : "sick"); |
816 |
30 |
} |
817 |
|
|
818 |
|
VCL_VOID v_matchproto_(td_directors_shard_backend) |
819 |
7 |
vmod_shard_debug(VRT_CTX, struct vmod_directors_shard *vshard, |
820 |
|
VCL_INT i) |
821 |
|
{ |
822 |
7 |
CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC); |
823 |
|
|
824 |
7 |
(void)ctx; |
825 |
7 |
sharddir_debug(vshard->shardd, i & UINT32_MAX); |
826 |
7 |
} |
827 |
|
|
828 |
|
/* ============================================================= |
829 |
|
* shard_param |
830 |
|
*/ |
831 |
|
|
832 |
|
VCL_VOID v_matchproto_(td_directors_shard_param__init) |
833 |
17 |
vmod_shard_param__init(VRT_CTX, |
834 |
|
struct vmod_directors_shard_param **pp, const char *vcl_name) |
835 |
|
{ |
836 |
|
struct vmod_directors_shard_param *p; |
837 |
|
|
838 |
17 |
(void) ctx; |
839 |
17 |
AN(pp); |
840 |
17 |
AZ(*pp); |
841 |
17 |
ALLOC_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC); |
842 |
17 |
AN(p); |
843 |
17 |
p->vcl_name = vcl_name; |
844 |
17 |
p->scope = SCOPE_VCL; |
845 |
17 |
p->defaults = &shard_param_default; |
846 |
|
|
847 |
17 |
*pp = p; |
848 |
17 |
} |
849 |
|
|
850 |
|
VCL_VOID v_matchproto_(td_directors_shard_param__fini) |
851 |
8 |
vmod_shard_param__fini(struct vmod_directors_shard_param **pp) |
852 |
|
{ |
853 |
|
struct vmod_directors_shard_param *p; |
854 |
|
|
855 |
8 |
TAKE_OBJ_NOTNULL(p, pp, VMOD_SHARD_SHARD_PARAM_MAGIC); |
856 |
8 |
FREE_OBJ(p); |
857 |
8 |
} |
858 |
|
|
859 |
|
/* |
860 |
|
* init a stack param struct defaulting to pa with the given name |
861 |
|
*/ |
862 |
|
static struct vmod_directors_shard_param * |
863 |
255 |
shard_param_stack(struct vmod_directors_shard_param *p, |
864 |
|
const struct vmod_directors_shard_param *pa, const char *who) |
865 |
|
{ |
866 |
255 |
CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC); |
867 |
255 |
assert(pa->scope > _SCOPE_INVALID); |
868 |
|
|
869 |
255 |
AN(p); |
870 |
255 |
INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC); |
871 |
255 |
p->vcl_name = who; |
872 |
255 |
p->scope = SCOPE_STACK; |
873 |
255 |
p->defaults = pa; |
874 |
|
|
875 |
255 |
return (p); |
876 |
|
} |
877 |
|
|
878 |
|
static const struct vmod_directors_shard_param * |
879 |
259 |
shard_param_task_r(VRT_CTX, const void *id, const char *who, |
880 |
|
const struct vmod_directors_shard_param *pa) |
881 |
|
{ |
882 |
|
const struct vmod_directors_shard_param *p; |
883 |
|
const struct vmod_priv *task; |
884 |
|
const void *task_id; |
885 |
|
|
886 |
259 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
887 |
259 |
CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC); |
888 |
259 |
assert(pa->scope > _SCOPE_INVALID); |
889 |
|
|
890 |
259 |
task_id = (const char *)id + task_off_param; |
891 |
259 |
task = VRT_priv_task_get(ctx, task_id); |
892 |
|
|
893 |
259 |
if (task) { |
894 |
136 |
CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC); |
895 |
136 |
assert(p->scope == SCOPE_TASK); |
896 |
136 |
assert(who == p->vcl_name); |
897 |
136 |
return (p); |
898 |
|
} |
899 |
|
|
900 |
123 |
if (id == pa || pa->scope != SCOPE_VCL) |
901 |
89 |
return (pa); |
902 |
|
|
903 |
34 |
return (shard_param_task_r(ctx, pa, pa->vcl_name, pa)); |
904 |
259 |
} |
905 |
|
|
906 |
|
/* |
907 |
|
* get a task scoped param struct for id defaulting to pa |
908 |
|
* if id != pa and pa has VCL scope, also get a task scoped param struct for pa |
909 |
|
*/ |
910 |
|
static struct vmod_directors_shard_param * |
911 |
98 |
shard_param_task_l(VRT_CTX, const void *id, const char *who, |
912 |
|
const struct vmod_directors_shard_param *pa) |
913 |
|
{ |
914 |
|
struct vmod_directors_shard_param *p; |
915 |
|
struct vmod_priv *task; |
916 |
|
const void *task_id; |
917 |
|
|
918 |
98 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
919 |
98 |
CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC); |
920 |
98 |
assert(pa->scope > _SCOPE_INVALID); |
921 |
|
|
922 |
98 |
task_id = (const char *)id + task_off_param; |
923 |
98 |
task = VRT_priv_task(ctx, task_id); |
924 |
|
|
925 |
98 |
if (task == NULL) { |
926 |
0 |
shard_fail(ctx, who, "%s", "no priv_task"); |
927 |
0 |
return (NULL); |
928 |
|
} |
929 |
|
|
930 |
98 |
if (task->priv) { |
931 |
8 |
CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC); |
932 |
8 |
assert(p->scope == SCOPE_TASK); |
933 |
8 |
assert(who == p->vcl_name); |
934 |
8 |
return (p); |
935 |
|
} |
936 |
|
|
937 |
180 |
WS_TASK_ALLOC_OBJ(ctx, p, VMOD_SHARD_SHARD_PARAM_MAGIC); |
938 |
90 |
if (p == NULL) |
939 |
0 |
return (NULL); |
940 |
90 |
task->priv = p; |
941 |
90 |
p->vcl_name = who; |
942 |
90 |
p->scope = SCOPE_TASK; |
943 |
|
|
944 |
90 |
if (id == pa || pa->scope != SCOPE_VCL) |
945 |
88 |
p->defaults = pa; |
946 |
|
else |
947 |
2 |
p->defaults = shard_param_task_l(ctx, pa, pa->vcl_name, pa); |
948 |
|
|
949 |
90 |
if (p->defaults == NULL) |
950 |
0 |
return (NULL); |
951 |
|
|
952 |
90 |
return (p); |
953 |
98 |
} |
954 |
|
|
955 |
|
static struct vmod_directors_shard_param * |
956 |
103 |
shard_param_prep(VRT_CTX, struct vmod_directors_shard_param *p, |
957 |
|
const char *who) |
958 |
|
{ |
959 |
103 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
960 |
103 |
CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC); |
961 |
|
|
962 |
103 |
if (ctx->method & SHARD_VCL_TASK_REQ) { |
963 |
0 |
shard_fail(ctx, p->vcl_name, "%s may only be used " |
964 |
|
"in vcl_init and in backend/pipe context", who); |
965 |
0 |
return (NULL); |
966 |
103 |
} else if (ctx->method & SHARD_VCL_TASK_BEREQ) |
967 |
88 |
p = shard_param_task_l(ctx, p, p->vcl_name, p); |
968 |
|
else |
969 |
15 |
assert(ctx->method & VCL_MET_TASK_H); |
970 |
|
|
971 |
103 |
return (p); |
972 |
103 |
} |
973 |
|
|
974 |
|
VCL_VOID v_matchproto_(td_directors_shard_param_set) |
975 |
98 |
vmod_shard_param_set(VRT_CTX, struct vmod_directors_shard_param *p, |
976 |
|
struct VARGS(shard_param_set) *a) |
977 |
|
{ |
978 |
98 |
uint32_t args = shard_param_set_mask(a); |
979 |
|
|
980 |
98 |
assert((args & ~arg_mask_set_) == 0); |
981 |
|
|
982 |
98 |
p = shard_param_prep(ctx, p, "shard_param.set()"); |
983 |
98 |
if (p == NULL) |
984 |
0 |
return; |
985 |
196 |
(void) shard_param_args(ctx, p, "shard_param.set()", args, |
986 |
98 |
a->by, a->key, a->key_blob, a->alt, a->warmup, |
987 |
98 |
a->rampup, a->healthy); |
988 |
98 |
} |
989 |
|
|
990 |
|
VCL_VOID v_matchproto_(td_directors_shard_param_clear) |
991 |
5 |
vmod_shard_param_clear(VRT_CTX, |
992 |
|
struct vmod_directors_shard_param *p) |
993 |
|
{ |
994 |
5 |
p = shard_param_prep(ctx, p, "shard_param.clear()"); |
995 |
5 |
if (p == NULL) |
996 |
0 |
return; |
997 |
5 |
p->mask = 0; |
998 |
5 |
} |
999 |
|
|
1000 |
|
static const struct vmod_directors_shard_param * |
1001 |
165 |
vmod_shard_param_read(VRT_CTX, const void *id, const char *who, |
1002 |
|
const struct vmod_directors_shard_param *p, |
1003 |
|
struct vmod_directors_shard_param *pstk) |
1004 |
|
{ |
1005 |
|
struct vmod_directors_shard_param *pp; |
1006 |
|
|
1007 |
165 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
1008 |
165 |
CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1009 |
|
|
1010 |
165 |
if (ctx->method == 0 || (ctx->method & SHARD_VCL_TASK_BEREQ)) |
1011 |
135 |
p = shard_param_task_r(ctx, id, who, p); |
1012 |
|
|
1013 |
165 |
CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1014 |
165 |
pp = shard_param_stack(pstk, p, p->vcl_name); |
1015 |
165 |
shard_param_merge(pp, p); |
1016 |
165 |
return (pp); |
1017 |
|
} |
1018 |
|
|
1019 |
|
VCL_STRING v_matchproto_(td_directors_shard_param_get_by) |
1020 |
25 |
vmod_shard_param_get_by(VRT_CTX, |
1021 |
|
struct vmod_directors_shard_param *p) |
1022 |
|
{ |
1023 |
|
struct vmod_directors_shard_param pstk; |
1024 |
|
const struct vmod_directors_shard_param *pp; |
1025 |
|
|
1026 |
25 |
pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk); |
1027 |
25 |
CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1028 |
25 |
return (default_by(pp->by)); |
1029 |
|
} |
1030 |
|
|
1031 |
|
VCL_INT v_matchproto_(td_directors_shard_param_get_key) |
1032 |
25 |
vmod_shard_param_get_key(VRT_CTX, |
1033 |
|
struct vmod_directors_shard_param *p) |
1034 |
|
{ |
1035 |
|
struct vmod_directors_shard_param pstk; |
1036 |
|
const struct vmod_directors_shard_param *pp; |
1037 |
|
|
1038 |
25 |
pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk); |
1039 |
25 |
CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1040 |
25 |
return ((VCL_INT)shard_get_key(ctx, pp)); |
1041 |
|
} |
1042 |
|
VCL_INT v_matchproto_(td_directors_shard_param_get_alt) |
1043 |
25 |
vmod_shard_param_get_alt(VRT_CTX, |
1044 |
|
struct vmod_directors_shard_param *p) |
1045 |
|
{ |
1046 |
|
struct vmod_directors_shard_param pstk; |
1047 |
|
const struct vmod_directors_shard_param *pp; |
1048 |
|
|
1049 |
25 |
pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk); |
1050 |
25 |
CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1051 |
25 |
return (pp->alt); |
1052 |
|
} |
1053 |
|
|
1054 |
|
VCL_REAL v_matchproto_(td_directors_shard_param_get_warmup) |
1055 |
25 |
vmod_shard_param_get_warmup(VRT_CTX, |
1056 |
|
struct vmod_directors_shard_param *p) |
1057 |
|
{ |
1058 |
|
struct vmod_directors_shard_param pstk; |
1059 |
|
const struct vmod_directors_shard_param *pp; |
1060 |
|
|
1061 |
25 |
pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk); |
1062 |
25 |
CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1063 |
25 |
return (pp->warmup); |
1064 |
|
} |
1065 |
|
|
1066 |
|
VCL_BOOL v_matchproto_(td_directors_shard_param_get_rampup) |
1067 |
25 |
vmod_shard_param_get_rampup(VRT_CTX, |
1068 |
|
struct vmod_directors_shard_param *p) |
1069 |
|
{ |
1070 |
|
struct vmod_directors_shard_param pstk; |
1071 |
|
const struct vmod_directors_shard_param *pp; |
1072 |
|
|
1073 |
25 |
pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk); |
1074 |
25 |
CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1075 |
25 |
return (pp->rampup); |
1076 |
|
} |
1077 |
|
|
1078 |
|
VCL_STRING v_matchproto_(td_directors_shard_param_get_healthy) |
1079 |
25 |
vmod_shard_param_get_healthy(VRT_CTX, |
1080 |
|
struct vmod_directors_shard_param *p) |
1081 |
|
{ |
1082 |
|
struct vmod_directors_shard_param pstk; |
1083 |
|
const struct vmod_directors_shard_param *pp; |
1084 |
|
|
1085 |
25 |
pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk); |
1086 |
25 |
CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1087 |
25 |
return (default_healthy(pp->healthy)); |
1088 |
|
} |
1089 |
|
|
1090 |
|
static const struct vmod_directors_shard_param * |
1091 |
6 |
shard_param_blob(VCL_BLOB blob) |
1092 |
|
{ |
1093 |
|
const struct vmod_directors_shard_param *p; |
1094 |
|
|
1095 |
6 |
CHECK_OBJ_ORNULL(blob, VRT_BLOB_MAGIC); |
1096 |
10 |
if (blob && blob->type == VMOD_SHARD_SHARD_PARAM_BLOB && |
1097 |
4 |
blob->blob != NULL && |
1098 |
4 |
blob->len == sizeof(struct vmod_directors_shard_param)) { |
1099 |
4 |
CAST_OBJ_NOTNULL(p, blob->blob, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1100 |
4 |
return (p); |
1101 |
|
} |
1102 |
|
|
1103 |
2 |
return (NULL); |
1104 |
6 |
} |
1105 |
|
|
1106 |
|
VCL_BLOB v_matchproto_(td_directors_shard_param_use) |
1107 |
4 |
vmod_shard_param_use(VRT_CTX, |
1108 |
|
struct vmod_directors_shard_param *p) |
1109 |
|
{ |
1110 |
4 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
1111 |
4 |
CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC); |
1112 |
|
|
1113 |
4 |
return (VRT_blob(ctx, "xshard_param.use()", p, sizeof *p, |
1114 |
|
VMOD_SHARD_SHARD_PARAM_BLOB)); |
1115 |
|
} |