| | varnish-cache/vmod/vmod_directors_fall_back.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2013-2015 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@FreeBSD.org> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
*/ |
29 |
|
|
30 |
|
#include "config.h" |
31 |
|
|
32 |
|
#include <stdlib.h> |
33 |
|
#include <string.h> |
34 |
|
|
35 |
|
#include "cache/cache.h" |
36 |
|
|
37 |
|
#include "vcc_directors_if.h" |
38 |
|
|
39 |
|
#include "vmod_directors.h" |
40 |
|
#include "vsb.h" |
41 |
|
#include "vbm.h" |
42 |
|
|
43 |
|
struct vmod_directors_fallback { |
44 |
|
unsigned magic; |
45 |
|
#define VMOD_DIRECTORS_FALLBACK_MAGIC 0xad4e26ba |
46 |
|
struct vdir *vd; |
47 |
|
VCL_BOOL st; |
48 |
|
unsigned cur; |
49 |
|
}; |
50 |
|
|
51 |
|
static VCL_BOOL v_matchproto_(vdi_healthy) |
52 |
56 |
vmod_fallback_healthy(VRT_CTX, VCL_BACKEND dir, VCL_TIME *changed) |
53 |
|
{ |
54 |
|
struct vmod_directors_fallback *fb; |
55 |
|
|
56 |
56 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
57 |
56 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
58 |
56 |
CAST_OBJ_NOTNULL(fb, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
59 |
56 |
return (vdir_any_healthy(ctx, fb->vd, changed)); |
60 |
|
} |
61 |
|
|
62 |
|
static void v_matchproto_(vdi_list_f) |
63 |
182 |
vmod_fallback_list(VRT_CTX, VCL_BACKEND dir, struct vsb *vsb, int pflag, |
64 |
|
int jflag) |
65 |
|
{ |
66 |
|
struct vmod_directors_fallback *fb; |
67 |
|
struct vdir *vd; |
68 |
|
VCL_BACKEND be; |
69 |
|
VCL_BOOL h; |
70 |
|
unsigned u, nh; |
71 |
|
|
72 |
182 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
73 |
182 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
74 |
182 |
CAST_OBJ_NOTNULL(fb, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
75 |
182 |
CAST_OBJ_NOTNULL(vd, fb->vd, VDIR_MAGIC); |
76 |
|
|
77 |
182 |
if (pflag) { |
78 |
28 |
if (jflag) { |
79 |
14 |
VSB_cat(vsb, "{\n"); |
80 |
14 |
VSB_indent(vsb, 2); |
81 |
28 |
VSB_printf(vsb, "\"sticky\": %s,\n", |
82 |
14 |
fb->st ? "true" : "false"); |
83 |
14 |
VSB_cat(vsb, "\"backends\": {\n"); |
84 |
14 |
VSB_indent(vsb, 2); |
85 |
14 |
} else { |
86 |
14 |
VSB_cat(vsb, "\n\n\tBackend\tCurrent\tHealth\n"); |
87 |
|
} |
88 |
28 |
} |
89 |
|
|
90 |
182 |
vdir_rdlock(vd); |
91 |
182 |
vdir_update_health(ctx, vd); |
92 |
294 |
for (u = 0; pflag && u < vd->n_backend; u++) { |
93 |
112 |
be = vd->backend[u]; |
94 |
112 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
95 |
|
|
96 |
112 |
h = vbit_test(vd->healthy, u); |
97 |
|
|
98 |
112 |
if (jflag) { |
99 |
56 |
if (u) |
100 |
42 |
VSB_cat(vsb, ",\n"); |
101 |
56 |
VSB_printf(vsb, "\"%s\": {\n", be->vcl_name); |
102 |
56 |
VSB_indent(vsb, 2); |
103 |
|
|
104 |
56 |
if (fb->cur == u) |
105 |
14 |
VSB_cat(vsb, "\"current\": true,\n"); |
106 |
|
else |
107 |
42 |
VSB_cat(vsb, "\"current\": false,\n"); |
108 |
|
|
109 |
56 |
if (h) |
110 |
42 |
VSB_cat(vsb, "\"health\": \"healthy\"\n"); |
111 |
|
else |
112 |
14 |
VSB_cat(vsb, "\"health\": \"sick\"\n"); |
113 |
|
|
114 |
56 |
VSB_indent(vsb, -2); |
115 |
56 |
VSB_cat(vsb, "}"); |
116 |
56 |
} else { |
117 |
56 |
VSB_cat(vsb, "\t"); |
118 |
56 |
VSB_cat(vsb, be->vcl_name); |
119 |
56 |
if (fb->cur == u) |
120 |
14 |
VSB_cat(vsb, "\t*\t"); |
121 |
|
else |
122 |
42 |
VSB_cat(vsb, "\t\t"); |
123 |
56 |
VSB_cat(vsb, h ? "healthy" : "sick"); |
124 |
56 |
VSB_cat(vsb, "\n"); |
125 |
|
} |
126 |
112 |
} |
127 |
182 |
nh = vd->n_healthy; |
128 |
182 |
u = vd->n_backend; |
129 |
182 |
vdir_unlock(vd); |
130 |
|
|
131 |
182 |
if (jflag && (pflag)) { |
132 |
14 |
VSB_cat(vsb, "\n"); |
133 |
14 |
VSB_indent(vsb, -2); |
134 |
14 |
VSB_cat(vsb, "}\n"); |
135 |
14 |
VSB_indent(vsb, -2); |
136 |
14 |
VSB_cat(vsb, "},\n"); |
137 |
14 |
} |
138 |
|
|
139 |
182 |
if (pflag) |
140 |
28 |
return; |
141 |
|
|
142 |
154 |
if (jflag) |
143 |
84 |
VSB_printf(vsb, "[%u, %u, \"%s\"]", nh, u, |
144 |
42 |
nh ? "healthy" : "sick"); |
145 |
|
else |
146 |
112 |
VSB_printf(vsb, "%u/%u\t%s", nh, u, nh ? "healthy" : "sick"); |
147 |
182 |
} |
148 |
|
|
149 |
|
static VCL_BACKEND v_matchproto_(vdi_resolve_f) |
150 |
378 |
vmod_fallback_resolve(VRT_CTX, VCL_BACKEND dir) |
151 |
|
{ |
152 |
|
struct vmod_directors_fallback *fb; |
153 |
|
unsigned u; |
154 |
378 |
VCL_BACKEND be = NULL; |
155 |
|
|
156 |
378 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
157 |
378 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
158 |
378 |
CAST_OBJ_NOTNULL(fb, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
159 |
|
|
160 |
378 |
vdir_wrlock(fb->vd); |
161 |
378 |
if (!fb->st) |
162 |
252 |
fb->cur = 0; |
163 |
630 |
for (u = 0; u < fb->vd->n_backend; u++) { |
164 |
616 |
be = fb->vd->backend[fb->cur]; |
165 |
616 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
166 |
616 |
if (VRT_Healthy(ctx, be, NULL)) |
167 |
364 |
break; |
168 |
252 |
if (++fb->cur == fb->vd->n_backend) |
169 |
28 |
fb->cur = 0; |
170 |
252 |
} |
171 |
378 |
if (u == fb->vd->n_backend) |
172 |
14 |
be = NULL; |
173 |
378 |
vdir_unlock(fb->vd); |
174 |
378 |
return (be); |
175 |
|
} |
176 |
|
|
177 |
|
static void v_matchproto_(vdi_release_f) |
178 |
14 |
vmod_fallback_release(VCL_BACKEND dir) |
179 |
|
{ |
180 |
|
struct vmod_directors_fallback *fallback; |
181 |
|
|
182 |
14 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
183 |
14 |
CAST_OBJ_NOTNULL(fallback, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
184 |
14 |
vdir_release(fallback->vd); |
185 |
14 |
} |
186 |
|
|
187 |
|
static void v_matchproto_(vdi_destroy_f) |
188 |
14 |
vmod_fallback_destroy(VCL_BACKEND dir) |
189 |
|
{ |
190 |
|
struct vmod_directors_fallback *fallback; |
191 |
|
|
192 |
14 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
193 |
14 |
CAST_OBJ_NOTNULL(fallback, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
194 |
14 |
vdir_delete(&fallback->vd); |
195 |
14 |
FREE_OBJ(fallback); |
196 |
14 |
} |
197 |
|
|
198 |
|
static const struct vdi_methods vmod_fallback_methods[1] = {{ |
199 |
|
.magic = VDI_METHODS_MAGIC, |
200 |
|
.type = "fallback", |
201 |
|
.healthy = vmod_fallback_healthy, |
202 |
|
.resolve = vmod_fallback_resolve, |
203 |
|
.release = vmod_fallback_release, |
204 |
|
.destroy = vmod_fallback_destroy, |
205 |
|
.list = vmod_fallback_list |
206 |
|
}}; |
207 |
|
|
208 |
|
|
209 |
|
VCL_VOID v_matchproto_() |
210 |
70 |
vmod_fallback__init(VRT_CTX, |
211 |
|
struct vmod_directors_fallback **fbp, const char *vcl_name, VCL_BOOL sticky) |
212 |
|
{ |
213 |
|
struct vmod_directors_fallback *fb; |
214 |
|
|
215 |
70 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
216 |
70 |
AN(fbp); |
217 |
70 |
AZ(*fbp); |
218 |
70 |
ALLOC_OBJ(fb, VMOD_DIRECTORS_FALLBACK_MAGIC); |
219 |
70 |
AN(fb); |
220 |
70 |
*fbp = fb; |
221 |
70 |
vdir_new(ctx, &fb->vd, vcl_name, vmod_fallback_methods, fb); |
222 |
70 |
fb->st = sticky; |
223 |
70 |
} |
224 |
|
|
225 |
|
VCL_VOID v_matchproto_() |
226 |
14 |
vmod_fallback__fini(struct vmod_directors_fallback **fbp) |
227 |
|
{ |
228 |
|
struct vmod_directors_fallback *fb; |
229 |
|
|
230 |
14 |
TAKE_OBJ_NOTNULL(fb, fbp, VMOD_DIRECTORS_FALLBACK_MAGIC); |
231 |
14 |
VRT_DelDirector(&fb->vd->dir); |
232 |
14 |
} |
233 |
|
|
234 |
|
VCL_VOID v_matchproto_() |
235 |
224 |
vmod_fallback_add_backend(VRT_CTX, |
236 |
|
struct vmod_directors_fallback *fb, VCL_BACKEND be) |
237 |
|
{ |
238 |
|
|
239 |
224 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
240 |
224 |
CHECK_OBJ_NOTNULL(fb, VMOD_DIRECTORS_FALLBACK_MAGIC); |
241 |
224 |
vdir_add_backend(ctx, fb->vd, be, 0.0); |
242 |
224 |
} |
243 |
|
|
244 |
|
VCL_VOID v_matchproto_() |
245 |
56 |
vmod_fallback_remove_backend(VRT_CTX, |
246 |
|
struct vmod_directors_fallback *fb, VCL_BACKEND be) |
247 |
|
{ |
248 |
56 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
249 |
56 |
CHECK_OBJ_NOTNULL(fb, VMOD_DIRECTORS_FALLBACK_MAGIC); |
250 |
56 |
vdir_remove_backend(ctx, fb->vd, be, &fb->cur); |
251 |
56 |
} |
252 |
|
|
253 |
|
VCL_BACKEND v_matchproto_() |
254 |
378 |
vmod_fallback_backend(VRT_CTX, |
255 |
|
struct vmod_directors_fallback *fb) |
256 |
|
{ |
257 |
378 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
258 |
378 |
CHECK_OBJ_NOTNULL(fb, VMOD_DIRECTORS_FALLBACK_MAGIC); |
259 |
378 |
return (fb->vd->dir); |
260 |
|
} |