| | varnish-cache/vmod/vmod_directors.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2013-2015 Varnish Software AS |
2 |
|
* Copyright 2019 UPLEX - Nils Goroll Systemoptimierung |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@FreeBSD.org> |
6 |
|
* Author: Nils Goroll <nils.goroll@uplex.de> |
7 |
|
* |
8 |
|
* SPDX-License-Identifier: BSD-2-Clause |
9 |
|
* |
10 |
|
* Redistribution and use in source and binary forms, with or without |
11 |
|
* modification, are permitted provided that the following conditions |
12 |
|
* are met: |
13 |
|
* 1. Redistributions of source code must retain the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer. |
15 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
16 |
|
* notice, this list of conditions and the following disclaimer in the |
17 |
|
* documentation and/or other materials provided with the distribution. |
18 |
|
* |
19 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
20 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
21 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
22 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
23 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
24 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
25 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
26 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
27 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
28 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
29 |
|
* SUCH DAMAGE. |
30 |
|
*/ |
31 |
|
|
32 |
|
#include "config.h" |
33 |
|
|
34 |
|
#include <stdlib.h> |
35 |
|
#include <stdio.h> |
36 |
|
|
37 |
|
#include "cache/cache.h" |
38 |
|
|
39 |
|
#include "vbm.h" |
40 |
|
#include "vcl.h" |
41 |
|
#include "vsb.h" |
42 |
|
|
43 |
|
#include "vcc_directors_if.h" |
44 |
|
|
45 |
|
#include "vmod_directors.h" |
46 |
|
|
47 |
|
VCL_BACKEND |
48 |
80 |
VPFX(lookup)(VRT_CTX, VCL_STRING name) |
49 |
|
{ |
50 |
80 |
if ((ctx->method & VCL_MET_TASK_H) == 0) { |
51 |
40 |
VRT_fail(ctx, |
52 |
|
"lookup() may only be called from vcl_init / vcl_fini"); |
53 |
40 |
return (NULL); |
54 |
|
} |
55 |
|
|
56 |
40 |
return (VRT_LookupDirector(ctx, name)); |
57 |
80 |
} |
58 |
|
|
59 |
|
static void |
60 |
1200 |
vdir_expand(struct vdir *vd, unsigned n) |
61 |
|
{ |
62 |
1200 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
63 |
|
|
64 |
1200 |
vd->backend = realloc(vd->backend, n * sizeof *vd->backend); |
65 |
1200 |
AN(vd->backend); |
66 |
1200 |
vd->weight = realloc(vd->weight, n * sizeof *vd->weight); |
67 |
1200 |
AN(vd->weight); |
68 |
1200 |
if (n > vd->healthy->nbits) |
69 |
0 |
vbit_expand(vd->healthy, n); |
70 |
1200 |
AN(vd->healthy); |
71 |
1200 |
vd->l_backend = n; |
72 |
1200 |
} |
73 |
|
|
74 |
|
void |
75 |
1320 |
vdir_new(VRT_CTX, struct vdir **vdp, const char *vcl_name, |
76 |
|
const struct vdi_methods *m, void *priv) |
77 |
|
{ |
78 |
|
struct vdir *vd; |
79 |
|
|
80 |
1320 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
81 |
1320 |
CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC); |
82 |
1320 |
AN(vcl_name); |
83 |
1320 |
AN(vdp); |
84 |
1320 |
AZ(*vdp); |
85 |
1320 |
ALLOC_OBJ(vd, VDIR_MAGIC); |
86 |
1320 |
AN(vd); |
87 |
1320 |
*vdp = vd; |
88 |
1320 |
AZ(pthread_rwlock_init(&vd->mtx, NULL)); |
89 |
1320 |
vd->dir = VRT_AddDirector(ctx, m, priv, "%s", vcl_name); |
90 |
1320 |
vd->healthy = vbit_new(8); |
91 |
1320 |
AN(vd->healthy); |
92 |
1320 |
} |
93 |
|
|
94 |
|
void |
95 |
240 |
vdir_delete(struct vdir **vdp) |
96 |
|
{ |
97 |
|
struct vdir *vd; |
98 |
|
unsigned u; |
99 |
|
|
100 |
240 |
TAKE_OBJ_NOTNULL(vd, vdp, VDIR_MAGIC); |
101 |
|
|
102 |
240 |
AZ(vd->dir); |
103 |
480 |
for (u = 0; u < vd->n_backend; u++) |
104 |
240 |
VRT_Assign_Backend(&vd->backend[u], NULL); |
105 |
240 |
free(vd->backend); |
106 |
240 |
free(vd->weight); |
107 |
240 |
AZ(pthread_rwlock_destroy(&vd->mtx)); |
108 |
240 |
vbit_destroy(vd->healthy); |
109 |
240 |
FREE_OBJ(vd); |
110 |
240 |
} |
111 |
|
|
112 |
|
void |
113 |
6240 |
vdir_rdlock(struct vdir *vd) |
114 |
|
{ |
115 |
6240 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
116 |
6240 |
AZ(pthread_rwlock_rdlock(&vd->mtx)); |
117 |
6240 |
} |
118 |
|
|
119 |
|
void |
120 |
6360 |
vdir_wrlock(struct vdir *vd) |
121 |
|
{ |
122 |
6360 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
123 |
6360 |
AZ(pthread_rwlock_wrlock(&vd->mtx)); |
124 |
6360 |
} |
125 |
|
|
126 |
|
void |
127 |
12600 |
vdir_unlock(struct vdir *vd) |
128 |
|
{ |
129 |
12600 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
130 |
12600 |
AZ(pthread_rwlock_unlock(&vd->mtx)); |
131 |
12600 |
} |
132 |
|
|
133 |
|
|
134 |
|
void |
135 |
3200 |
vdir_add_backend(VRT_CTX, struct vdir *vd, VCL_BACKEND be, double weight) |
136 |
|
{ |
137 |
|
unsigned u; |
138 |
|
|
139 |
3200 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
140 |
3200 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
141 |
3200 |
if (be == NULL) { |
142 |
240 |
VRT_fail(ctx, "%s: None backend cannot be added", |
143 |
120 |
VRT_BACKEND_string(vd->dir)); |
144 |
120 |
return; |
145 |
|
} |
146 |
3080 |
AN(be); |
147 |
3080 |
vdir_wrlock(vd); |
148 |
3080 |
if (vd->n_backend >= vd->l_backend) |
149 |
1200 |
vdir_expand(vd, vd->l_backend + 16); |
150 |
3080 |
assert(vd->n_backend < vd->l_backend); |
151 |
3080 |
u = vd->n_backend++; |
152 |
3080 |
vd->backend[u] = NULL; |
153 |
3080 |
VRT_Assign_Backend(&vd->backend[u], be); |
154 |
3080 |
vd->weight[u] = weight; |
155 |
3080 |
vdir_unlock(vd); |
156 |
3200 |
} |
157 |
|
|
158 |
|
void |
159 |
480 |
vdir_remove_backend(VRT_CTX, struct vdir *vd, VCL_BACKEND be, unsigned *cur) |
160 |
|
{ |
161 |
|
unsigned u, n; |
162 |
|
|
163 |
480 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
164 |
480 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
165 |
480 |
if (be == NULL) { |
166 |
0 |
VRT_fail(ctx, "%s: None backend cannot be removed", |
167 |
0 |
VRT_BACKEND_string(vd->dir)); |
168 |
0 |
return; |
169 |
|
} |
170 |
480 |
CHECK_OBJ(be, DIRECTOR_MAGIC); |
171 |
480 |
vdir_wrlock(vd); |
172 |
680 |
for (u = 0; u < vd->n_backend; u++) |
173 |
680 |
if (vd->backend[u] == be) |
174 |
480 |
break; |
175 |
480 |
if (u == vd->n_backend) { |
176 |
0 |
vdir_unlock(vd); |
177 |
0 |
return; |
178 |
|
} |
179 |
480 |
VRT_Assign_Backend(&vd->backend[u], NULL); |
180 |
480 |
n = (vd->n_backend - u) - 1; |
181 |
480 |
memmove(&vd->backend[u], &vd->backend[u+1], n * sizeof(vd->backend[0])); |
182 |
480 |
memmove(&vd->weight[u], &vd->weight[u+1], n * sizeof(vd->weight[0])); |
183 |
480 |
vd->n_backend--; |
184 |
|
|
185 |
480 |
if (cur) { |
186 |
160 |
assert(*cur <= vd->n_backend); |
187 |
160 |
if (u < *cur) |
188 |
40 |
(*cur)--; |
189 |
120 |
else if (*cur == vd->n_backend) |
190 |
40 |
*cur = 0; |
191 |
160 |
} |
192 |
480 |
vdir_unlock(vd); |
193 |
480 |
} |
194 |
|
|
195 |
|
VCL_BOOL |
196 |
1960 |
vdir_any_healthy(VRT_CTX, struct vdir *vd, VCL_TIME *changed) |
197 |
|
{ |
198 |
1960 |
unsigned retval = 0; |
199 |
|
VCL_BACKEND be; |
200 |
|
unsigned u; |
201 |
|
vtim_real c; |
202 |
|
|
203 |
1960 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
204 |
1960 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
205 |
1960 |
vdir_rdlock(vd); |
206 |
1960 |
if (changed != NULL) |
207 |
400 |
*changed = 0; |
208 |
2800 |
for (u = 0; u < vd->n_backend; u++) { |
209 |
2520 |
be = vd->backend[u]; |
210 |
2520 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
211 |
2520 |
retval = VRT_Healthy(ctx, be, &c); |
212 |
2520 |
if (changed != NULL && c > *changed) |
213 |
520 |
*changed = c; |
214 |
2520 |
if (retval) |
215 |
1680 |
break; |
216 |
840 |
} |
217 |
1960 |
vdir_unlock(vd); |
218 |
1960 |
return (retval); |
219 |
|
} |
220 |
|
|
221 |
|
void |
222 |
2040 |
vdir_list(VRT_CTX, struct vdir *vd, struct vsb *vsb, int pflag, int jflag, |
223 |
|
int weight) |
224 |
|
{ |
225 |
|
VCL_BACKEND be; |
226 |
|
VCL_BOOL h; |
227 |
|
unsigned u, nh; |
228 |
|
double w; |
229 |
|
|
230 |
2040 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
231 |
2040 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
232 |
|
|
233 |
2040 |
if (pflag) { |
234 |
320 |
if (jflag) { |
235 |
160 |
VSB_cat(vsb, "{\n"); |
236 |
160 |
VSB_indent(vsb, 2); |
237 |
160 |
if (weight) |
238 |
80 |
VSB_printf(vsb, "\"total_weight\": %f,\n", |
239 |
40 |
vd->total_weight); |
240 |
160 |
VSB_cat(vsb, "\"backends\": {\n"); |
241 |
160 |
VSB_indent(vsb, 2); |
242 |
160 |
} else { |
243 |
160 |
VSB_cat(vsb, "\n\n\tBackend\tWeight\tHealth\n"); |
244 |
|
} |
245 |
320 |
} |
246 |
|
|
247 |
2040 |
vdir_rdlock(vd); |
248 |
2040 |
vdir_update_health(ctx, vd); |
249 |
2920 |
for (u = 0; pflag && u < vd->n_backend; u++) { |
250 |
880 |
be = vd->backend[u]; |
251 |
880 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
252 |
|
|
253 |
880 |
h = vbit_test(vd->healthy, u); |
254 |
|
|
255 |
880 |
w = h ? vd->weight[u] : 0.0; |
256 |
|
|
257 |
880 |
if (jflag) { |
258 |
440 |
if (u) |
259 |
280 |
VSB_cat(vsb, ",\n"); |
260 |
440 |
VSB_printf(vsb, "\"%s\": {\n", be->vcl_name); |
261 |
440 |
VSB_indent(vsb, 2); |
262 |
|
|
263 |
440 |
if (weight) |
264 |
160 |
VSB_printf(vsb, "\"weight\": %f,\n", w); |
265 |
|
|
266 |
440 |
if (h) |
267 |
320 |
VSB_cat(vsb, "\"health\": \"healthy\"\n"); |
268 |
|
else |
269 |
120 |
VSB_cat(vsb, "\"health\": \"sick\"\n"); |
270 |
|
|
271 |
440 |
VSB_indent(vsb, -2); |
272 |
440 |
VSB_cat(vsb, "}"); |
273 |
440 |
} else { |
274 |
440 |
VSB_cat(vsb, "\t"); |
275 |
440 |
VSB_cat(vsb, be->vcl_name); |
276 |
440 |
if (weight) |
277 |
320 |
VSB_printf(vsb, "\t%6.2f%%\t", |
278 |
160 |
100 * w / vd->total_weight); |
279 |
|
else |
280 |
280 |
VSB_cat(vsb, "\t-\t"); |
281 |
440 |
VSB_cat(vsb, h ? "healthy" : "sick"); |
282 |
440 |
VSB_cat(vsb, "\n"); |
283 |
|
} |
284 |
880 |
} |
285 |
2040 |
nh = vd->n_healthy; |
286 |
2040 |
u = vd->n_backend; |
287 |
2040 |
vdir_unlock(vd); |
288 |
|
|
289 |
2040 |
if (jflag && (pflag)) { |
290 |
160 |
VSB_cat(vsb, "\n"); |
291 |
160 |
VSB_indent(vsb, -2); |
292 |
160 |
VSB_cat(vsb, "}\n"); |
293 |
160 |
VSB_indent(vsb, -2); |
294 |
160 |
VSB_cat(vsb, "},\n"); |
295 |
160 |
} |
296 |
|
|
297 |
2040 |
if (pflag) |
298 |
320 |
return; |
299 |
|
|
300 |
1720 |
if (jflag) |
301 |
800 |
VSB_printf(vsb, "[%u, %u, \"%s\"]", nh, u, |
302 |
400 |
nh ? "healthy" : "sick"); |
303 |
|
else |
304 |
1320 |
VSB_printf(vsb, "%u/%u\t%s", nh, u, nh ? "healthy" : "sick"); |
305 |
2040 |
} |
306 |
|
|
307 |
|
/* |
308 |
|
* iterate backends and update |
309 |
|
* - healthy bitmap |
310 |
|
* - number of healthy backends |
311 |
|
* - total_weight |
312 |
|
* - last change time of the VCL_BACKEND |
313 |
|
* |
314 |
|
* must be called under the vdir lock (read or write). |
315 |
|
* |
316 |
|
* A write lock is required if consistency between the individual attributes is |
317 |
|
* a must, e.g. when total_weight is required to be the exact sum of the weights |
318 |
|
* |
319 |
|
* The read lock is safe because add_backend expands the healthy bitmap and all |
320 |
|
* other members are atomic and may be used if consistency is not required. |
321 |
|
*/ |
322 |
|
void |
323 |
4440 |
vdir_update_health(VRT_CTX, struct vdir *vd) |
324 |
|
{ |
325 |
4440 |
VCL_TIME c, changed = 0; |
326 |
|
VCL_BOOL h; |
327 |
|
VCL_BACKEND be; |
328 |
4440 |
unsigned u, nh = 0; |
329 |
4440 |
double tw = 0.0; |
330 |
|
struct vbitmap *healthy; |
331 |
|
|
332 |
4440 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
333 |
4440 |
healthy = vd->healthy; |
334 |
14760 |
for (u = 0; u < vd->n_backend; u++) { |
335 |
10320 |
be = vd->backend[u]; |
336 |
10320 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
337 |
10320 |
c = 0; |
338 |
10320 |
h = VRT_Healthy(ctx, vd->backend[u], &c); |
339 |
10320 |
if (h) { |
340 |
7960 |
nh++; |
341 |
7960 |
tw += vd->weight[u]; |
342 |
7960 |
} |
343 |
10320 |
if (c > changed) |
344 |
9920 |
changed = c; |
345 |
10320 |
if (h != vbit_test(healthy, u)) { |
346 |
5200 |
if (h) |
347 |
5120 |
vbit_set(healthy, u); |
348 |
|
else |
349 |
80 |
vbit_clr(healthy, u); |
350 |
5200 |
} |
351 |
10320 |
} |
352 |
4440 |
VRT_SetChanged(vd->dir, changed); |
353 |
4440 |
vd->total_weight = tw; |
354 |
4440 |
vd->n_healthy = nh; |
355 |
4440 |
} |
356 |
|
|
357 |
|
static unsigned |
358 |
1840 |
vdir_pick_by_weight(const struct vdir *vd, double w) |
359 |
|
{ |
360 |
1840 |
const struct vbitmap *healthy = vd->healthy; |
361 |
1840 |
double a = 0.0; |
362 |
|
unsigned u; |
363 |
|
|
364 |
1840 |
AN(healthy); |
365 |
2799 |
for (u = 0; u < vd->n_backend; u++) { |
366 |
2799 |
if (! vbit_test(healthy, u)) |
367 |
160 |
continue; |
368 |
2639 |
a += vd->weight[u]; |
369 |
2639 |
if (w < a) |
370 |
1840 |
return (u); |
371 |
799 |
} |
372 |
0 |
WRONG(""); |
373 |
|
} |
374 |
|
|
375 |
|
VCL_BACKEND |
376 |
1880 |
vdir_pick_be(VRT_CTX, struct vdir *vd, double w) |
377 |
|
{ |
378 |
|
unsigned u; |
379 |
1880 |
VCL_BACKEND be = NULL; |
380 |
|
|
381 |
1880 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
382 |
1880 |
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); |
383 |
1880 |
vdir_wrlock(vd); |
384 |
1880 |
vdir_update_health(ctx, vd); |
385 |
1880 |
if (vd->total_weight > 0.0) { |
386 |
1840 |
u = vdir_pick_by_weight(vd, w * vd->total_weight); |
387 |
1840 |
assert(u < vd->n_backend); |
388 |
1840 |
be = vd->backend[u]; |
389 |
1840 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
390 |
1840 |
} |
391 |
1880 |
vdir_unlock(vd); |
392 |
1880 |
return (be); |
393 |
|
} |