| | varnish-cache/bin/varnishd/cache/cache_backend.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
* |
30 |
|
* The director implementation for VCL backends. |
31 |
|
* |
32 |
|
*/ |
33 |
|
|
34 |
|
#include "config.h" |
35 |
|
|
36 |
|
#include <stdlib.h> |
37 |
|
|
38 |
|
#include "cache_varnishd.h" |
39 |
|
#include "cache_director.h" |
40 |
|
|
41 |
|
#include "vtcp.h" |
42 |
|
#include "vtim.h" |
43 |
|
#include "vsa.h" |
44 |
|
|
45 |
|
#include "cache_backend.h" |
46 |
|
#include "cache_conn_pool.h" |
47 |
|
#include "cache_transport.h" |
48 |
|
#include "cache_vcl.h" |
49 |
|
#include "http1/cache_http1.h" |
50 |
|
#include "proxy/cache_proxy.h" |
51 |
|
|
52 |
|
#include "VSC_vbe.h" |
53 |
|
|
54 |
|
/*--------------------------------------------------------------------*/ |
55 |
|
|
56 |
|
enum connwait_e { |
57 |
|
CW_DO_CONNECT = 1, |
58 |
|
CW_QUEUED, |
59 |
|
CW_DEQUEUED, |
60 |
|
CW_BE_BUSY, |
61 |
|
}; |
62 |
|
|
63 |
|
struct connwait { |
64 |
|
unsigned magic; |
65 |
|
#define CONNWAIT_MAGIC 0x75c7a52b |
66 |
|
enum connwait_e cw_state; |
67 |
|
VTAILQ_ENTRY(connwait) cw_list; |
68 |
|
pthread_cond_t cw_cond; |
69 |
|
}; |
70 |
|
|
71 |
|
static const char * const vbe_proto_ident = "HTTP Backend"; |
72 |
|
|
73 |
|
static struct lock backends_mtx; |
74 |
|
|
75 |
|
/*--------------------------------------------------------------------*/ |
76 |
|
|
77 |
|
void |
78 |
1729 |
VBE_Connect_Error(struct VSC_vbe *vsc, int err) |
79 |
|
{ |
80 |
|
|
81 |
1729 |
switch(err) { |
82 |
|
case 0: |
83 |
|
/* |
84 |
|
* This is kind of brittle, but zero is the only |
85 |
|
* value of errno we can trust to have no meaning. |
86 |
|
*/ |
87 |
749 |
vsc->helddown++; |
88 |
749 |
break; |
89 |
|
case EACCES: |
90 |
|
case EPERM: |
91 |
0 |
vsc->fail_eacces++; |
92 |
0 |
break; |
93 |
|
case EADDRNOTAVAIL: |
94 |
0 |
vsc->fail_eaddrnotavail++; |
95 |
0 |
break; |
96 |
|
case ECONNREFUSED: |
97 |
980 |
vsc->fail_econnrefused++; |
98 |
980 |
break; |
99 |
|
case ENETUNREACH: |
100 |
0 |
vsc->fail_enetunreach++; |
101 |
0 |
break; |
102 |
|
case ETIMEDOUT: |
103 |
0 |
vsc->fail_etimedout++; |
104 |
0 |
break; |
105 |
|
default: |
106 |
0 |
vsc->fail_other++; |
107 |
0 |
} |
108 |
1729 |
} |
109 |
|
|
110 |
|
/*--------------------------------------------------------------------*/ |
111 |
|
|
112 |
|
#define FIND_TMO(tmx, dst, bo, be) \ |
113 |
|
do { \ |
114 |
|
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); \ |
115 |
|
dst = bo->tmx; \ |
116 |
|
if (isnan(dst) && be->tmx >= 0.0) \ |
117 |
|
dst = be->tmx; \ |
118 |
|
if (isnan(dst)) \ |
119 |
|
dst = cache_param->tmx; \ |
120 |
|
} while (0) |
121 |
|
|
122 |
|
#define FIND_BE_SPEC(tmx, dst, be, def) \ |
123 |
|
do { \ |
124 |
|
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); \ |
125 |
|
dst = be->tmx; \ |
126 |
|
if (dst == def) \ |
127 |
|
dst = cache_param->tmx; \ |
128 |
|
} while (0) |
129 |
|
|
130 |
|
#define FIND_BE_PARAM(tmx, dst, be) \ |
131 |
|
FIND_BE_SPEC(tmx, dst, be, 0) |
132 |
|
|
133 |
|
#define FIND_BE_TMO(tmx, dst, be) \ |
134 |
|
FIND_BE_SPEC(tmx, dst, be, -1.0) |
135 |
|
|
136 |
|
#define BE_BUSY(be) \ |
137 |
|
(be->max_connections > 0 && be->n_conn >= be->max_connections) |
138 |
|
|
139 |
|
/*--------------------------------------------------------------------*/ |
140 |
|
|
141 |
|
static void |
142 |
2820 |
vbe_connwait_broadcast(const struct backend *bp) |
143 |
|
{ |
144 |
|
struct connwait *cw; |
145 |
|
|
146 |
2820 |
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); |
147 |
|
|
148 |
2820 |
Lck_Lock(bp->director->mtx); |
149 |
3180 |
VTAILQ_FOREACH(cw, &bp->cw_head, cw_list) { |
150 |
360 |
CHECK_OBJ(cw, CONNWAIT_MAGIC); |
151 |
360 |
assert(cw->cw_state == CW_QUEUED); |
152 |
360 |
PTOK(pthread_cond_signal(&cw->cw_cond)); |
153 |
360 |
} |
154 |
2820 |
Lck_Unlock(bp->director->mtx); |
155 |
2820 |
} |
156 |
|
|
157 |
|
static void |
158 |
85997 |
vbe_connwait_signal_locked(const struct backend *bp) |
159 |
|
{ |
160 |
|
struct connwait *cw; |
161 |
|
|
162 |
85997 |
Lck_AssertHeld(bp->director->mtx); |
163 |
|
|
164 |
85997 |
if (bp->n_conn < bp->max_connections) { |
165 |
440 |
cw = VTAILQ_FIRST(&bp->cw_head); |
166 |
440 |
if (cw != NULL) { |
167 |
40 |
CHECK_OBJ(cw, CONNWAIT_MAGIC); |
168 |
40 |
assert(cw->cw_state == CW_QUEUED); |
169 |
40 |
PTOK(pthread_cond_signal(&cw->cw_cond)); |
170 |
40 |
} |
171 |
440 |
} |
172 |
85997 |
} |
173 |
|
|
174 |
|
static void |
175 |
86559 |
vbe_connwait_fini(struct connwait *cw) |
176 |
|
{ |
177 |
86559 |
CHECK_OBJ_NOTNULL(cw, CONNWAIT_MAGIC); |
178 |
86559 |
assert(cw->cw_state != CW_QUEUED); |
179 |
86559 |
PTOK(pthread_cond_destroy(&cw->cw_cond)); |
180 |
86559 |
FINI_OBJ(cw); |
181 |
86559 |
} |
182 |
|
|
183 |
|
/*-------------------------------------------------------------------- |
184 |
|
* Get a connection to the backend |
185 |
|
* |
186 |
|
* note: wrk is a separate argument because it differs for pipe vs. fetch |
187 |
|
*/ |
188 |
|
|
189 |
|
static struct pfd * |
190 |
86760 |
vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp, |
191 |
|
unsigned force_fresh) |
192 |
|
{ |
193 |
|
struct busyobj *bo; |
194 |
|
struct pfd *pfd; |
195 |
|
int *fdp, err; |
196 |
|
vtim_dur tmod; |
197 |
|
char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE]; |
198 |
|
char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE]; |
199 |
|
unsigned wait_limit; |
200 |
|
vtim_dur wait_tmod; |
201 |
|
vtim_dur wait_end; |
202 |
|
struct connwait cw[1]; |
203 |
|
|
204 |
86760 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
205 |
86760 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
206 |
86760 |
bo = ctx->bo; |
207 |
86760 |
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); |
208 |
86760 |
AN(bp->vsc); |
209 |
|
|
210 |
86760 |
if (!VRT_Healthy(ctx, dir, NULL)) { |
211 |
400 |
VSLb(bo->vsl, SLT_FetchError, |
212 |
200 |
"backend %s: unhealthy", VRT_BACKEND_string(dir)); |
213 |
200 |
bp->vsc->unhealthy++; |
214 |
200 |
VSC_C_main->backend_unhealthy++; |
215 |
200 |
return (NULL); |
216 |
|
} |
217 |
86560 |
INIT_OBJ(cw, CONNWAIT_MAGIC); |
218 |
86560 |
PTOK(pthread_cond_init(&cw->cw_cond, NULL)); |
219 |
86560 |
Lck_Lock(bp->director->mtx); |
220 |
86560 |
FIND_BE_PARAM(backend_wait_limit, wait_limit, bp); |
221 |
86560 |
FIND_BE_TMO(backend_wait_timeout, wait_tmod, bp); |
222 |
86560 |
cw->cw_state = CW_DO_CONNECT; |
223 |
86560 |
if (!VTAILQ_EMPTY(&bp->cw_head) || BE_BUSY(bp)) |
224 |
560 |
cw->cw_state = CW_BE_BUSY; |
225 |
|
|
226 |
87040 |
if (cw->cw_state == CW_BE_BUSY && wait_limit > 0 && |
227 |
480 |
wait_tmod > 0.0 && bp->cw_count < wait_limit) { |
228 |
480 |
VTAILQ_INSERT_TAIL(&bp->cw_head, cw, cw_list); |
229 |
480 |
bp->cw_count++; |
230 |
480 |
VSC_C_main->backend_wait++; |
231 |
480 |
cw->cw_state = CW_QUEUED; |
232 |
480 |
wait_end = VTIM_real() + wait_tmod; |
233 |
480 |
do { |
234 |
960 |
err = Lck_CondWaitUntil(&cw->cw_cond, bp->director->mtx, |
235 |
480 |
wait_end); |
236 |
480 |
} while (err == EINTR); |
237 |
480 |
assert(cw->cw_state == CW_QUEUED); |
238 |
480 |
VTAILQ_REMOVE(&bp->cw_head, cw, cw_list); |
239 |
480 |
cw->cw_state = CW_DEQUEUED; |
240 |
480 |
bp->cw_count--; |
241 |
480 |
if ((err != 0 && BE_BUSY(bp)) || !VRT_Healthy(ctx, dir, NULL)) { |
242 |
440 |
VSC_C_main->backend_wait_fail++; |
243 |
440 |
cw->cw_state = CW_BE_BUSY; |
244 |
440 |
} |
245 |
480 |
} |
246 |
86560 |
if (cw->cw_state != CW_BE_BUSY) |
247 |
86038 |
bp->n_conn++; |
248 |
86560 |
Lck_Unlock(bp->director->mtx); |
249 |
|
|
250 |
86560 |
if (cw->cw_state == CW_BE_BUSY) { |
251 |
1040 |
VSLb(bo->vsl, SLT_FetchError, |
252 |
520 |
"backend %s: busy", VRT_BACKEND_string(dir)); |
253 |
520 |
bp->vsc->busy++; |
254 |
520 |
VSC_C_main->backend_busy++; |
255 |
520 |
vbe_connwait_fini(cw); |
256 |
520 |
return (NULL); |
257 |
|
} |
258 |
|
|
259 |
86040 |
AZ(bo->htc); |
260 |
86040 |
bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc); |
261 |
|
/* XXX: we may want to detect the ws overflow sooner */ |
262 |
86040 |
if (bo->htc == NULL) { |
263 |
1240 |
VSLb(bo->vsl, SLT_FetchError, "out of workspace"); |
264 |
|
/* XXX: counter ? */ |
265 |
1240 |
Lck_Lock(bp->director->mtx); |
266 |
1240 |
bp->n_conn--; |
267 |
1240 |
vbe_connwait_signal_locked(bp); |
268 |
1240 |
Lck_Unlock(bp->director->mtx); |
269 |
1240 |
vbe_connwait_fini(cw); |
270 |
1240 |
return (NULL); |
271 |
|
} |
272 |
84800 |
bo->htc->doclose = SC_NULL; |
273 |
84800 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
274 |
|
|
275 |
84800 |
FIND_TMO(connect_timeout, tmod, bo, bp); |
276 |
84800 |
pfd = VCP_Get(bp->conn_pool, tmod, wrk, force_fresh, &err); |
277 |
84800 |
if (pfd == NULL) { |
278 |
1240 |
Lck_Lock(bp->director->mtx); |
279 |
1240 |
VBE_Connect_Error(bp->vsc, err); |
280 |
1240 |
bp->n_conn--; |
281 |
1240 |
vbe_connwait_signal_locked(bp); |
282 |
1240 |
Lck_Unlock(bp->director->mtx); |
283 |
2480 |
VSLb(bo->vsl, SLT_FetchError, |
284 |
|
"backend %s: fail errno %d (%s)", |
285 |
1240 |
VRT_BACKEND_string(dir), err, VAS_errtxt(err)); |
286 |
1240 |
VSC_C_main->backend_fail++; |
287 |
1240 |
bo->htc = NULL; |
288 |
1240 |
vbe_connwait_fini(cw); |
289 |
1240 |
return (NULL); |
290 |
|
} |
291 |
|
|
292 |
83560 |
VSLb_ts_busyobj(bo, "Connected", W_TIM_real(wrk)); |
293 |
83560 |
fdp = PFD_Fd(pfd); |
294 |
83560 |
AN(fdp); |
295 |
83560 |
assert(*fdp >= 0); |
296 |
|
|
297 |
83560 |
Lck_Lock(bp->director->mtx); |
298 |
83560 |
bp->vsc->conn++; |
299 |
83560 |
bp->vsc->req++; |
300 |
83560 |
Lck_Unlock(bp->director->mtx); |
301 |
|
|
302 |
83560 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
303 |
|
|
304 |
83560 |
err = 0; |
305 |
83560 |
if (bp->proxy_header != 0) |
306 |
200 |
err += VPX_Send_Proxy(*fdp, bp->proxy_header, bo->sp); |
307 |
83560 |
if (err < 0) { |
308 |
0 |
VSLb(bo->vsl, SLT_FetchError, |
309 |
|
"backend %s: proxy write errno %d (%s)", |
310 |
0 |
VRT_BACKEND_string(dir), |
311 |
0 |
errno, VAS_errtxt(errno)); |
312 |
|
// account as if connect failed - good idea? |
313 |
0 |
VSC_C_main->backend_fail++; |
314 |
0 |
bo->htc = NULL; |
315 |
0 |
VCP_Close(&pfd); |
316 |
0 |
AZ(pfd); |
317 |
0 |
Lck_Lock(bp->director->mtx); |
318 |
0 |
bp->n_conn--; |
319 |
0 |
bp->vsc->conn--; |
320 |
0 |
bp->vsc->req--; |
321 |
0 |
vbe_connwait_signal_locked(bp); |
322 |
0 |
Lck_Unlock(bp->director->mtx); |
323 |
0 |
vbe_connwait_fini(cw); |
324 |
0 |
return (NULL); |
325 |
|
} |
326 |
83560 |
bo->acct.bereq_hdrbytes += err; |
327 |
|
|
328 |
83560 |
PFD_LocalName(pfd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1); |
329 |
83560 |
PFD_RemoteName(pfd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2); |
330 |
167120 |
VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s %s", |
331 |
83560 |
*fdp, VRT_BACKEND_string(dir), abuf2, pbuf2, abuf1, pbuf1, |
332 |
83560 |
PFD_State(pfd) == PFD_STATE_STOLEN ? "reuse" : "connect"); |
333 |
|
|
334 |
83560 |
INIT_OBJ(bo->htc, HTTP_CONN_MAGIC); |
335 |
83560 |
bo->htc->priv = pfd; |
336 |
83560 |
bo->htc->rfd = fdp; |
337 |
83560 |
bo->htc->doclose = SC_NULL; |
338 |
83560 |
FIND_TMO(first_byte_timeout, |
339 |
|
bo->htc->first_byte_timeout, bo, bp); |
340 |
83560 |
FIND_TMO(between_bytes_timeout, |
341 |
|
bo->htc->between_bytes_timeout, bo, bp); |
342 |
83560 |
vbe_connwait_fini(cw); |
343 |
83560 |
return (pfd); |
344 |
86760 |
} |
345 |
|
|
346 |
|
static void v_matchproto_(vdi_finish_f) |
347 |
83516 |
vbe_dir_finish(VRT_CTX, VCL_BACKEND d) |
348 |
|
{ |
349 |
|
struct backend *bp; |
350 |
|
struct busyobj *bo; |
351 |
|
struct pfd *pfd; |
352 |
|
|
353 |
83516 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
354 |
83516 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
355 |
83516 |
bo = ctx->bo; |
356 |
83516 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
357 |
83516 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
358 |
|
|
359 |
83516 |
CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC); |
360 |
83516 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
361 |
|
|
362 |
83516 |
pfd = bo->htc->priv; |
363 |
83516 |
bo->htc->priv = NULL; |
364 |
83516 |
if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) { |
365 |
30408 |
VSLb(bo->vsl, SLT_BackendClose, "%d %s close %s", *PFD_Fd(pfd), |
366 |
15204 |
VRT_BACKEND_string(d), bo->htc->doclose->name); |
367 |
15204 |
VCP_Close(&pfd); |
368 |
15204 |
AZ(pfd); |
369 |
15204 |
Lck_Lock(bp->director->mtx); |
370 |
15204 |
} else { |
371 |
68312 |
assert (PFD_State(pfd) == PFD_STATE_USED); |
372 |
136624 |
VSLb(bo->vsl, SLT_BackendClose, "%d %s recycle", *PFD_Fd(pfd), |
373 |
68312 |
VRT_BACKEND_string(d)); |
374 |
68312 |
Lck_Lock(bp->director->mtx); |
375 |
68312 |
VSC_C_main->backend_recycle++; |
376 |
68312 |
VCP_Recycle(bo->wrk, &pfd); |
377 |
|
} |
378 |
83516 |
assert(bp->n_conn > 0); |
379 |
83516 |
bp->n_conn--; |
380 |
83516 |
AN(bp->vsc); |
381 |
83516 |
bp->vsc->conn--; |
382 |
|
#define ACCT(foo) bp->vsc->foo += bo->acct.foo; |
383 |
|
#include "tbl/acct_fields_bereq.h" |
384 |
|
vbe_connwait_signal_locked(bp); |
385 |
|
Lck_Unlock(bp->director->mtx); |
386 |
|
bo->htc = NULL; |
387 |
|
} |
388 |
|
|
389 |
|
static int v_matchproto_(vdi_gethdrs_f) |
390 |
85559 |
vbe_dir_gethdrs(VRT_CTX, VCL_BACKEND d) |
391 |
|
{ |
392 |
85559 |
int i, extrachance = 1; |
393 |
|
struct backend *bp; |
394 |
|
struct pfd *pfd; |
395 |
|
struct busyobj *bo; |
396 |
|
struct worker *wrk; |
397 |
|
|
398 |
85559 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
399 |
85559 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
400 |
85559 |
bo = ctx->bo; |
401 |
85559 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
402 |
85559 |
CHECK_OBJ_NOTNULL(bo->bereq, HTTP_MAGIC); |
403 |
85559 |
if (bo->htc != NULL) |
404 |
0 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
405 |
85559 |
wrk = ctx->bo->wrk; |
406 |
85559 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
407 |
85559 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
408 |
|
|
409 |
|
/* |
410 |
|
* Now that we know our backend, we can set a default Host: |
411 |
|
* header if one is necessary. This cannot be done in the VCL |
412 |
|
* because the backend may be chosen by a director. |
413 |
|
*/ |
414 |
85559 |
if (!http_GetHdr(bo->bereq, H_Host, NULL) && bp->hosthdr != NULL) |
415 |
2440 |
http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr); |
416 |
|
|
417 |
85559 |
do { |
418 |
85761 |
if (bo->htc != NULL) |
419 |
0 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
420 |
85761 |
pfd = vbe_dir_getfd(ctx, wrk, d, bp, extrachance == 0 ? 1 : 0); |
421 |
85761 |
if (pfd == NULL) |
422 |
3158 |
return (-1); |
423 |
82603 |
AN(bo->htc); |
424 |
82603 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
425 |
82603 |
if (PFD_State(pfd) != PFD_STATE_STOLEN) |
426 |
51843 |
extrachance = 0; |
427 |
|
|
428 |
165206 |
i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes, |
429 |
82603 |
&bo->acct.bereq_bodybytes); |
430 |
|
|
431 |
82603 |
if (i == 0 && PFD_State(pfd) != PFD_STATE_USED) { |
432 |
92151 |
if (VCP_Wait(wrk, pfd, VTIM_real() + |
433 |
61434 |
bo->htc->first_byte_timeout) != 0) { |
434 |
40 |
bo->htc->doclose = SC_RX_TIMEOUT; |
435 |
40 |
VSLb(bo->vsl, SLT_FetchError, |
436 |
|
"first byte timeout (reused connection)"); |
437 |
40 |
extrachance = 0; |
438 |
40 |
} |
439 |
30717 |
} |
440 |
|
|
441 |
82603 |
if (bo->htc->doclose == SC_NULL) { |
442 |
81403 |
assert(PFD_State(pfd) == PFD_STATE_USED); |
443 |
81403 |
if (i == 0) |
444 |
81399 |
i = V1F_FetchRespHdr(bo); |
445 |
81403 |
if (i == 0) { |
446 |
78437 |
AN(bo->htc->priv); |
447 |
78437 |
http_VSL_log(bo->beresp); |
448 |
78437 |
return (0); |
449 |
|
} |
450 |
2960 |
} |
451 |
4160 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
452 |
|
|
453 |
|
/* |
454 |
|
* If we recycled a backend connection, there is a finite chance |
455 |
|
* that the backend closed it before we got the bereq to it. |
456 |
|
* In that case do a single automatic retry if req.body allows. |
457 |
|
*/ |
458 |
4160 |
vbe_dir_finish(ctx, d); |
459 |
4160 |
AZ(bo->htc); |
460 |
4160 |
if (i < 0 || extrachance == 0) |
461 |
3960 |
break; |
462 |
200 |
if (bo->no_retry != NULL) |
463 |
0 |
break; |
464 |
200 |
VSC_C_main->backend_retry++; |
465 |
200 |
} while (extrachance--); |
466 |
3966 |
return (-1); |
467 |
85561 |
} |
468 |
|
|
469 |
|
static VCL_IP v_matchproto_(vdi_getip_f) |
470 |
40 |
vbe_dir_getip(VRT_CTX, VCL_BACKEND d) |
471 |
|
{ |
472 |
|
struct pfd *pfd; |
473 |
|
|
474 |
40 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
475 |
40 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
476 |
40 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
477 |
40 |
CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC); |
478 |
40 |
pfd = ctx->bo->htc->priv; |
479 |
|
|
480 |
40 |
return (VCP_GetIp(pfd)); |
481 |
|
} |
482 |
|
|
483 |
|
/*--------------------------------------------------------------------*/ |
484 |
|
|
485 |
|
static stream_close_t v_matchproto_(vdi_http1pipe_f) |
486 |
1000 |
vbe_dir_http1pipe(VRT_CTX, VCL_BACKEND d) |
487 |
|
{ |
488 |
|
int i; |
489 |
|
stream_close_t retval; |
490 |
|
struct backend *bp; |
491 |
|
struct v1p_acct v1a; |
492 |
|
struct pfd *pfd; |
493 |
|
vtim_real deadline; |
494 |
|
|
495 |
1000 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
496 |
1000 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
497 |
1000 |
CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC); |
498 |
1000 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
499 |
1000 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
500 |
|
|
501 |
1000 |
memset(&v1a, 0, sizeof v1a); |
502 |
|
|
503 |
|
/* This is hackish... */ |
504 |
1000 |
v1a.req = ctx->req->acct.req_hdrbytes; |
505 |
1000 |
ctx->req->acct.req_hdrbytes = 0; |
506 |
|
|
507 |
1000 |
ctx->req->res_mode = RES_PIPE; |
508 |
|
|
509 |
1000 |
retval = SC_TX_ERROR; |
510 |
1000 |
pfd = vbe_dir_getfd(ctx, ctx->req->wrk, d, bp, 0); |
511 |
|
|
512 |
1000 |
if (pfd != NULL) { |
513 |
960 |
CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC); |
514 |
1920 |
i = V1F_SendReq(ctx->req->wrk, ctx->bo, |
515 |
960 |
&v1a.bereq, &v1a.out); |
516 |
960 |
VSLb_ts_req(ctx->req, "Pipe", W_TIM_real(ctx->req->wrk)); |
517 |
960 |
if (i == 0) { |
518 |
960 |
deadline = ctx->bo->task_deadline; |
519 |
960 |
if (isnan(deadline)) |
520 |
920 |
deadline = cache_param->pipe_task_deadline; |
521 |
960 |
if (deadline > 0.) |
522 |
120 |
deadline += ctx->req->sp->t_idle; |
523 |
1920 |
retval = V1P_Process(ctx->req, *PFD_Fd(pfd), &v1a, |
524 |
960 |
deadline); |
525 |
960 |
} |
526 |
960 |
VSLb_ts_req(ctx->req, "PipeSess", W_TIM_real(ctx->req->wrk)); |
527 |
960 |
ctx->bo->htc->doclose = retval; |
528 |
960 |
vbe_dir_finish(ctx, d); |
529 |
960 |
} |
530 |
1000 |
V1P_Charge(ctx->req, &v1a, bp->vsc); |
531 |
1000 |
CHECK_OBJ_NOTNULL(retval, STREAM_CLOSE_MAGIC); |
532 |
1000 |
return (retval); |
533 |
|
} |
534 |
|
|
535 |
|
/*--------------------------------------------------------------------*/ |
536 |
|
|
537 |
|
static void |
538 |
60747 |
vbe_dir_event(const struct director *d, enum vcl_event_e ev) |
539 |
|
{ |
540 |
|
struct backend *bp; |
541 |
|
|
542 |
60747 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
543 |
60747 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
544 |
|
|
545 |
60747 |
if (ev == VCL_EVENT_WARM) { |
546 |
51560 |
VRT_VSC_Reveal(bp->vsc_seg); |
547 |
51560 |
if (bp->probe != NULL) |
548 |
1480 |
VBP_Control(bp, 1); |
549 |
60747 |
} else if (ev == VCL_EVENT_COLD) { |
550 |
3302 |
if (bp->probe != NULL) |
551 |
280 |
VBP_Control(bp, 0); |
552 |
3302 |
VRT_VSC_Hide(bp->vsc_seg); |
553 |
9187 |
} else if (ev == VCL_EVENT_DISCARD) { |
554 |
2665 |
VRT_DelDirector(&bp->director); |
555 |
5885 |
} else if (ev == VDI_EVENT_SICK) { |
556 |
3220 |
const struct vdi_ahealth *ah = d->vdir->admin_health; |
557 |
|
|
558 |
3220 |
if (ah == VDI_AH_SICK || (ah == VDI_AH_AUTO && bp->sick)) |
559 |
2820 |
vbe_connwait_broadcast(bp); |
560 |
3220 |
} |
561 |
60747 |
} |
562 |
|
|
563 |
|
/*---------------------------------------------------------------------*/ |
564 |
|
|
565 |
|
static void |
566 |
3225 |
vbe_free(struct backend *be) |
567 |
|
{ |
568 |
|
|
569 |
3225 |
CHECK_OBJ_NOTNULL(be, BACKEND_MAGIC); |
570 |
|
|
571 |
3225 |
if (be->probe != NULL) |
572 |
240 |
VBP_Remove(be); |
573 |
|
|
574 |
3225 |
VSC_vbe_Destroy(&be->vsc_seg); |
575 |
3225 |
Lck_Lock(&backends_mtx); |
576 |
3225 |
VSC_C_main->n_backend--; |
577 |
3225 |
Lck_Unlock(&backends_mtx); |
578 |
3225 |
VCP_Rel(&be->conn_pool); |
579 |
|
|
580 |
|
#define DA(x) do { if (be->x != NULL) free(be->x); } while (0) |
581 |
|
#define DN(x) /**/ |
582 |
3225 |
VRT_BACKEND_HANDLE(); |
583 |
|
#undef DA |
584 |
|
#undef DN |
585 |
3225 |
free(be->endpoint); |
586 |
|
|
587 |
3225 |
assert(VTAILQ_EMPTY(&be->cw_head)); |
588 |
3225 |
FREE_OBJ(be); |
589 |
3225 |
} |
590 |
|
|
591 |
|
static void v_matchproto_(vdi_destroy_f) |
592 |
3225 |
vbe_destroy(const struct director *d) |
593 |
|
{ |
594 |
|
struct backend *be; |
595 |
|
|
596 |
3225 |
CAST_OBJ_NOTNULL(be, d->priv, BACKEND_MAGIC); |
597 |
3225 |
vbe_free(be); |
598 |
3225 |
} |
599 |
|
|
600 |
|
/*--------------------------------------------------------------------*/ |
601 |
|
|
602 |
|
static void |
603 |
240 |
vbe_panic(const struct director *d, struct vsb *vsb) |
604 |
|
{ |
605 |
|
struct backend *bp; |
606 |
|
|
607 |
240 |
PAN_CheckMagic(vsb, d, DIRECTOR_MAGIC); |
608 |
240 |
bp = d->priv; |
609 |
240 |
PAN_CheckMagic(vsb, bp, BACKEND_MAGIC); |
610 |
|
|
611 |
240 |
VCP_Panic(vsb, bp->conn_pool); |
612 |
240 |
VSB_printf(vsb, "hosthdr = %s,\n", bp->hosthdr); |
613 |
240 |
VSB_printf(vsb, "n_conn = %u,\n", bp->n_conn); |
614 |
240 |
} |
615 |
|
|
616 |
|
/*-------------------------------------------------------------------- |
617 |
|
*/ |
618 |
|
|
619 |
|
static void v_matchproto_(vdi_list_f) |
620 |
45400 |
vbe_list(VRT_CTX, const struct director *d, struct vsb *vsb, int pflag, |
621 |
|
int jflag) |
622 |
|
{ |
623 |
|
char buf[VTCP_ADDRBUFSIZE]; |
624 |
|
struct backend *bp; |
625 |
|
struct vrt_endpoint *vep; |
626 |
|
|
627 |
45400 |
(void)ctx; |
628 |
|
|
629 |
45400 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
630 |
45400 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
631 |
45400 |
CHECK_OBJ_NOTNULL(bp->endpoint, VRT_ENDPOINT_MAGIC); |
632 |
|
|
633 |
45400 |
vep = bp->endpoint; |
634 |
|
|
635 |
45400 |
if (bp->probe != NULL) |
636 |
3120 |
VBP_Status(vsb, bp, pflag, jflag); |
637 |
42280 |
else if (jflag && pflag) |
638 |
120 |
VSB_cat(vsb, "{},\n"); |
639 |
42160 |
else if (jflag) |
640 |
440 |
VSB_cat(vsb, "[0, 0, \"healthy\"]"); |
641 |
41720 |
else if (pflag) |
642 |
360 |
return; |
643 |
|
else |
644 |
41360 |
VSB_cat(vsb, "0/0\thealthy"); |
645 |
|
|
646 |
45040 |
if (jflag && pflag) { |
647 |
200 |
if (vep->ipv4 != NULL) { |
648 |
200 |
VTCP_name(vep->ipv4, buf, sizeof buf, NULL, 0); |
649 |
200 |
VSB_printf(vsb, "\"ipv4\": \"%s\",\n", buf); |
650 |
200 |
} |
651 |
200 |
if (vep->ipv6 != NULL) { |
652 |
0 |
VTCP_name(vep->ipv6, buf, sizeof buf, NULL, 0); |
653 |
0 |
VSB_printf(vsb, "\"ipv6\": \"%s\",\n", buf); |
654 |
0 |
} |
655 |
200 |
} |
656 |
45400 |
} |
657 |
|
|
658 |
|
/*-------------------------------------------------------------------- |
659 |
|
*/ |
660 |
|
|
661 |
|
static VCL_BOOL v_matchproto_(vdi_healthy_f) |
662 |
2680 |
vbe_healthy(VRT_CTX, VCL_BACKEND d, VCL_TIME *t) |
663 |
|
{ |
664 |
|
struct backend *bp; |
665 |
|
|
666 |
2680 |
(void)ctx; |
667 |
2680 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
668 |
2680 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
669 |
|
|
670 |
2680 |
if (t != NULL) |
671 |
1680 |
*t = bp->changed; |
672 |
|
|
673 |
2680 |
return (!bp->sick); |
674 |
|
} |
675 |
|
|
676 |
|
/*-------------------------------------------------------------------- |
677 |
|
*/ |
678 |
|
|
679 |
|
static const struct vdi_methods vbe_methods[1] = {{ |
680 |
|
.magic = VDI_METHODS_MAGIC, |
681 |
|
.type = "backend", |
682 |
|
.http1pipe = vbe_dir_http1pipe, |
683 |
|
.gethdrs = vbe_dir_gethdrs, |
684 |
|
.getip = vbe_dir_getip, |
685 |
|
.finish = vbe_dir_finish, |
686 |
|
.event = vbe_dir_event, |
687 |
|
.destroy = vbe_destroy, |
688 |
|
.panic = vbe_panic, |
689 |
|
.list = vbe_list, |
690 |
|
.healthy = vbe_healthy |
691 |
|
}}; |
692 |
|
|
693 |
|
static const struct vdi_methods vbe_methods_noprobe[1] = {{ |
694 |
|
.magic = VDI_METHODS_MAGIC, |
695 |
|
.type = "backend", |
696 |
|
.http1pipe = vbe_dir_http1pipe, |
697 |
|
.gethdrs = vbe_dir_gethdrs, |
698 |
|
.getip = vbe_dir_getip, |
699 |
|
.finish = vbe_dir_finish, |
700 |
|
.event = vbe_dir_event, |
701 |
|
.destroy = vbe_destroy, |
702 |
|
.panic = vbe_panic, |
703 |
|
.list = vbe_list |
704 |
|
}}; |
705 |
|
|
706 |
|
/*-------------------------------------------------------------------- |
707 |
|
* Create a new static or dynamic director::backend instance. |
708 |
|
*/ |
709 |
|
|
710 |
|
size_t |
711 |
47360 |
VRT_backend_vsm_need(VRT_CTX) |
712 |
|
{ |
713 |
47360 |
(void)ctx; |
714 |
47360 |
return (VRT_VSC_Overhead(VSC_vbe_size)); |
715 |
|
} |
716 |
|
|
717 |
|
/* |
718 |
|
* The new_backend via parameter is a VCL_BACKEND, but we need a (struct |
719 |
|
* backend) |
720 |
|
* |
721 |
|
* For now, we resolve it when creating the backend, which implies no redundancy |
722 |
|
* / load balancing across the via director if it is more than a simple backend. |
723 |
|
*/ |
724 |
|
|
725 |
|
static const struct backend * |
726 |
320 |
via_resolve(VRT_CTX, const struct vrt_endpoint *vep, VCL_BACKEND via) |
727 |
|
{ |
728 |
320 |
const struct backend *viabe = NULL; |
729 |
|
|
730 |
320 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
731 |
320 |
CHECK_OBJ_NOTNULL(via, DIRECTOR_MAGIC); |
732 |
|
|
733 |
320 |
if (vep->uds_path) { |
734 |
0 |
VRT_fail(ctx, "Via is only supported for IP addresses"); |
735 |
0 |
return (NULL); |
736 |
|
} |
737 |
|
|
738 |
320 |
via = VRT_DirectorResolve(ctx, via); |
739 |
|
|
740 |
320 |
if (via == NULL) { |
741 |
0 |
VRT_fail(ctx, "Via resolution failed"); |
742 |
0 |
return (NULL); |
743 |
|
} |
744 |
|
|
745 |
320 |
CHECK_OBJ(via, DIRECTOR_MAGIC); |
746 |
320 |
CHECK_OBJ_NOTNULL(via->vdir, VCLDIR_MAGIC); |
747 |
|
|
748 |
320 |
if (via->vdir->methods == vbe_methods || |
749 |
320 |
via->vdir->methods == vbe_methods_noprobe) |
750 |
320 |
CAST_OBJ_NOTNULL(viabe, via->priv, BACKEND_MAGIC); |
751 |
|
|
752 |
320 |
if (viabe == NULL) |
753 |
0 |
VRT_fail(ctx, "Via does not resolve to a backend"); |
754 |
|
|
755 |
320 |
return (viabe); |
756 |
320 |
} |
757 |
|
|
758 |
|
/* |
759 |
|
* construct a new endpoint identical to vep with sa in a proxy header |
760 |
|
*/ |
761 |
|
static struct vrt_endpoint * |
762 |
320 |
via_endpoint(const struct vrt_endpoint *vep, const struct suckaddr *sa, |
763 |
|
const char *auth) |
764 |
|
{ |
765 |
|
struct vsb *preamble; |
766 |
|
struct vrt_blob blob[1]; |
767 |
|
struct vrt_endpoint *nvep, *ret; |
768 |
|
const struct suckaddr *client_bogo; |
769 |
|
|
770 |
320 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
771 |
320 |
AN(sa); |
772 |
|
|
773 |
320 |
nvep = VRT_Endpoint_Clone(vep); |
774 |
320 |
CHECK_OBJ_NOTNULL(nvep, VRT_ENDPOINT_MAGIC); |
775 |
|
|
776 |
320 |
if (VSA_Get_Proto(sa) == AF_INET6) |
777 |
0 |
client_bogo = bogo_ip6; |
778 |
|
else |
779 |
320 |
client_bogo = bogo_ip; |
780 |
|
|
781 |
320 |
preamble = VSB_new_auto(); |
782 |
320 |
AN(preamble); |
783 |
320 |
VPX_Format_Proxy(preamble, 2, client_bogo, sa, auth); |
784 |
320 |
blob->blob = VSB_data(preamble); |
785 |
320 |
blob->len = VSB_len(preamble); |
786 |
320 |
nvep->preamble = blob; |
787 |
320 |
ret = VRT_Endpoint_Clone(nvep); |
788 |
320 |
CHECK_OBJ_NOTNULL(ret, VRT_ENDPOINT_MAGIC); |
789 |
320 |
VSB_destroy(&preamble); |
790 |
320 |
FREE_OBJ(nvep); |
791 |
|
|
792 |
320 |
return (ret); |
793 |
|
} |
794 |
|
|
795 |
|
VCL_BACKEND |
796 |
51840 |
VRT_new_backend_clustered(VRT_CTX, struct vsmw_cluster *vc, |
797 |
|
const struct vrt_backend *vrt, VCL_BACKEND via) |
798 |
|
{ |
799 |
|
struct backend *be; |
800 |
|
struct vcl *vcl; |
801 |
|
const struct vrt_backend_probe *vbp; |
802 |
|
const struct vrt_endpoint *vep; |
803 |
|
const struct vdi_methods *m; |
804 |
51840 |
const struct suckaddr *sa = NULL; |
805 |
|
char abuf[VTCP_ADDRBUFSIZE]; |
806 |
51840 |
const struct backend *viabe = NULL; |
807 |
|
|
808 |
51840 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
809 |
51840 |
CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC); |
810 |
51840 |
vep = vrt->endpoint; |
811 |
51840 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
812 |
51840 |
if (vep->uds_path == NULL) { |
813 |
50000 |
if (vep->ipv4 == NULL && vep->ipv6 == NULL) { |
814 |
0 |
VRT_fail(ctx, "%s: Illegal IP", __func__); |
815 |
0 |
return (NULL); |
816 |
|
} |
817 |
50000 |
} else { |
818 |
1840 |
assert(vep->ipv4== NULL && vep->ipv6== NULL); |
819 |
|
} |
820 |
|
|
821 |
51840 |
if (via != NULL) { |
822 |
320 |
viabe = via_resolve(ctx, vep, via); |
823 |
320 |
if (viabe == NULL) |
824 |
0 |
return (NULL); |
825 |
320 |
} |
826 |
|
|
827 |
51840 |
vcl = ctx->vcl; |
828 |
51840 |
AN(vcl); |
829 |
51840 |
AN(vrt->vcl_name); |
830 |
|
|
831 |
|
/* Create new backend */ |
832 |
51840 |
ALLOC_OBJ(be, BACKEND_MAGIC); |
833 |
51840 |
if (be == NULL) |
834 |
0 |
return (NULL); |
835 |
51840 |
VTAILQ_INIT(&be->cw_head); |
836 |
|
|
837 |
|
#define DA(x) do { if (vrt->x != NULL) REPLACE((be->x), (vrt->x)); } while (0) |
838 |
|
#define DN(x) do { be->x = vrt->x; } while (0) |
839 |
51840 |
VRT_BACKEND_HANDLE(); |
840 |
|
#undef DA |
841 |
|
#undef DN |
842 |
|
|
843 |
|
#define CPTMO(a, b, x) do { \ |
844 |
|
if ((a)->x < 0.0 || isnan((a)->x)) \ |
845 |
|
(a)->x = (b)->x; \ |
846 |
|
} while(0) |
847 |
|
|
848 |
51760 |
if (viabe != NULL) { |
849 |
320 |
CPTMO(be, viabe, connect_timeout); |
850 |
320 |
CPTMO(be, viabe, first_byte_timeout); |
851 |
320 |
CPTMO(be, viabe, between_bytes_timeout); |
852 |
320 |
} |
853 |
|
#undef CPTMO |
854 |
|
|
855 |
51760 |
if (viabe || be->hosthdr == NULL) { |
856 |
360 |
if (vrt->endpoint->uds_path != NULL) |
857 |
40 |
sa = bogo_ip; |
858 |
320 |
else if (cache_param->prefer_ipv6 && vep->ipv6 != NULL) |
859 |
0 |
sa = vep->ipv6; |
860 |
320 |
else if (vep->ipv4!= NULL) |
861 |
320 |
sa = vep->ipv4; |
862 |
|
else |
863 |
0 |
sa = vep->ipv6; |
864 |
360 |
if (be->hosthdr == NULL) { |
865 |
40 |
VTCP_name(sa, abuf, sizeof abuf, NULL, 0); |
866 |
40 |
REPLACE(be->hosthdr, abuf); |
867 |
40 |
} |
868 |
360 |
} |
869 |
|
|
870 |
103520 |
be->vsc = VSC_vbe_New(vc, &be->vsc_seg, |
871 |
51760 |
"%s.%s", VCL_Name(ctx->vcl), vrt->vcl_name); |
872 |
51760 |
AN(be->vsc); |
873 |
51760 |
if (! vcl->temp->is_warm) |
874 |
51240 |
VRT_VSC_Hide(be->vsc_seg); |
875 |
|
|
876 |
51760 |
if (viabe) |
877 |
560 |
vep = be->endpoint = via_endpoint(viabe->endpoint, sa, |
878 |
280 |
be->authority); |
879 |
|
else |
880 |
51480 |
vep = be->endpoint = VRT_Endpoint_Clone(vep); |
881 |
|
|
882 |
51760 |
AN(vep); |
883 |
51760 |
be->conn_pool = VCP_Ref(vep, vbe_proto_ident); |
884 |
51760 |
AN(be->conn_pool); |
885 |
|
|
886 |
51760 |
vbp = vrt->probe; |
887 |
51760 |
if (vbp == NULL) |
888 |
50800 |
vbp = VCL_DefaultProbe(vcl); |
889 |
|
|
890 |
51760 |
if (vbp != NULL) { |
891 |
1440 |
VBP_Insert(be, vbp, be->conn_pool); |
892 |
1440 |
m = vbe_methods; |
893 |
1440 |
} else { |
894 |
50320 |
be->sick = 0; |
895 |
50320 |
be->vsc->happy = UINT64_MAX; |
896 |
50320 |
m = vbe_methods_noprobe; |
897 |
|
} |
898 |
|
|
899 |
51760 |
Lck_Lock(&backends_mtx); |
900 |
51760 |
VSC_C_main->n_backend++; |
901 |
51760 |
Lck_Unlock(&backends_mtx); |
902 |
|
|
903 |
51760 |
be->director = VRT_AddDirector(ctx, m, be, "%s", vrt->vcl_name); |
904 |
|
|
905 |
51760 |
if (be->director == NULL) { |
906 |
0 |
vbe_free(be); |
907 |
0 |
return (NULL); |
908 |
|
} |
909 |
|
/* for cold VCL, update initial director state */ |
910 |
51760 |
if (be->probe != NULL) |
911 |
1480 |
VBP_Update_Backend(be->probe); |
912 |
51760 |
return (be->director); |
913 |
51760 |
} |
914 |
|
|
915 |
|
VCL_BACKEND |
916 |
1280 |
VRT_new_backend(VRT_CTX, const struct vrt_backend *vrt, VCL_BACKEND via) |
917 |
|
{ |
918 |
|
|
919 |
1280 |
CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC); |
920 |
1280 |
CHECK_OBJ_NOTNULL(vrt->endpoint, VRT_ENDPOINT_MAGIC); |
921 |
1280 |
return (VRT_new_backend_clustered(ctx, NULL, vrt, via)); |
922 |
|
} |
923 |
|
|
924 |
|
/*-------------------------------------------------------------------- |
925 |
|
* Delete a dynamic director::backend instance. Undeleted dynamic and |
926 |
|
* static instances are GC'ed when the VCL is discarded (in cache_vcl.c) |
927 |
|
*/ |
928 |
|
|
929 |
|
void |
930 |
3145 |
VRT_delete_backend(VRT_CTX, VCL_BACKEND *dp) |
931 |
|
{ |
932 |
|
|
933 |
3145 |
(void)ctx; |
934 |
3145 |
CHECK_OBJ_NOTNULL(*dp, DIRECTOR_MAGIC); |
935 |
3145 |
VRT_DisableDirector(*dp); |
936 |
3145 |
VRT_Assign_Backend(dp, NULL); |
937 |
3145 |
} |
938 |
|
|
939 |
|
/*---------------------------------------------------------------------*/ |
940 |
|
|
941 |
|
void |
942 |
36629 |
VBE_InitCfg(void) |
943 |
|
{ |
944 |
|
|
945 |
36629 |
Lck_New(&backends_mtx, lck_vbe); |
946 |
36629 |
} |