| | varnish-cache/bin/varnishd/cache/cache_backend.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2006 Verdens Gang AS |
| 2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
| 3 |
|
* All rights reserved. |
| 4 |
|
* |
| 5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 6 |
|
* |
| 7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 8 |
|
* |
| 9 |
|
* Redistribution and use in source and binary forms, with or without |
| 10 |
|
* modification, are permitted provided that the following conditions |
| 11 |
|
* are met: |
| 12 |
|
* 1. Redistributions of source code must retain the above copyright |
| 13 |
|
* notice, this list of conditions and the following disclaimer. |
| 14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 15 |
|
* notice, this list of conditions and the following disclaimer in the |
| 16 |
|
* documentation and/or other materials provided with the distribution. |
| 17 |
|
* |
| 18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 |
|
* SUCH DAMAGE. |
| 29 |
|
* |
| 30 |
|
* The director implementation for VCL backends. |
| 31 |
|
* |
| 32 |
|
*/ |
| 33 |
|
|
| 34 |
|
#include "config.h" |
| 35 |
|
|
| 36 |
|
#include <stdlib.h> |
| 37 |
|
|
| 38 |
|
#include "cache_varnishd.h" |
| 39 |
|
#include "cache_director.h" |
| 40 |
|
|
| 41 |
|
#include "vtcp.h" |
| 42 |
|
#include "vtim.h" |
| 43 |
|
#include "vsa.h" |
| 44 |
|
|
| 45 |
|
#include "cache_backend.h" |
| 46 |
|
#include "cache_conn_pool.h" |
| 47 |
|
#include "cache_transport.h" |
| 48 |
|
#include "cache_vcl.h" |
| 49 |
|
#include "http1/cache_http1.h" |
| 50 |
|
#include "proxy/cache_proxy.h" |
| 51 |
|
|
| 52 |
|
#include "VSC_vbe.h" |
| 53 |
|
|
| 54 |
|
/*--------------------------------------------------------------------*/ |
| 55 |
|
|
| 56 |
|
enum connwait_e { |
| 57 |
|
CW_DO_CONNECT = 1, |
| 58 |
|
CW_QUEUED, |
| 59 |
|
CW_DEQUEUED, |
| 60 |
|
CW_BE_BUSY, |
| 61 |
|
}; |
| 62 |
|
|
| 63 |
|
struct connwait { |
| 64 |
|
unsigned magic; |
| 65 |
|
#define CONNWAIT_MAGIC 0x75c7a52b |
| 66 |
|
enum connwait_e cw_state; |
| 67 |
|
VTAILQ_ENTRY(connwait) cw_list; |
| 68 |
|
pthread_cond_t cw_cond; |
| 69 |
|
}; |
| 70 |
|
|
| 71 |
|
static const char * const vbe_proto_ident = "HTTP Backend"; |
| 72 |
|
|
| 73 |
|
static struct lock backends_mtx; |
| 74 |
|
|
| 75 |
|
/*--------------------------------------------------------------------*/ |
| 76 |
|
|
| 77 |
|
void |
| 78 |
374 |
VBE_Connect_Error(struct VSC_vbe *vsc, int err) |
| 79 |
|
{ |
| 80 |
|
|
| 81 |
374 |
switch(err) { |
| 82 |
|
case 0: |
| 83 |
|
/* |
| 84 |
|
* This is kind of brittle, but zero is the only |
| 85 |
|
* value of errno we can trust to have no meaning. |
| 86 |
|
*/ |
| 87 |
160 |
vsc->helddown++; |
| 88 |
160 |
break; |
| 89 |
|
case EACCES: |
| 90 |
|
case EPERM: |
| 91 |
0 |
vsc->fail_eacces++; |
| 92 |
0 |
break; |
| 93 |
|
case EADDRNOTAVAIL: |
| 94 |
0 |
vsc->fail_eaddrnotavail++; |
| 95 |
0 |
break; |
| 96 |
|
case ECONNREFUSED: |
| 97 |
214 |
vsc->fail_econnrefused++; |
| 98 |
214 |
break; |
| 99 |
|
case ENETUNREACH: |
| 100 |
0 |
vsc->fail_enetunreach++; |
| 101 |
0 |
break; |
| 102 |
|
case ETIMEDOUT: |
| 103 |
0 |
vsc->fail_etimedout++; |
| 104 |
0 |
break; |
| 105 |
|
default: |
| 106 |
0 |
vsc->fail_other++; |
| 107 |
0 |
} |
| 108 |
374 |
} |
| 109 |
|
|
| 110 |
|
/*--------------------------------------------------------------------*/ |
| 111 |
|
|
| 112 |
|
#define FIND_TMO(tmx, dst, bo, be) \ |
| 113 |
|
do { \ |
| 114 |
|
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); \ |
| 115 |
|
dst = bo->tmx; \ |
| 116 |
|
if (isnan(dst) && be->tmx >= 0.0) \ |
| 117 |
|
dst = be->tmx; \ |
| 118 |
|
if (isnan(dst)) \ |
| 119 |
|
dst = cache_param->tmx; \ |
| 120 |
|
} while (0) |
| 121 |
|
|
| 122 |
|
#define FIND_BE_SPEC(tmx, dst, be, def) \ |
| 123 |
|
do { \ |
| 124 |
|
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); \ |
| 125 |
|
dst = be->tmx; \ |
| 126 |
|
if (dst == def) \ |
| 127 |
|
dst = cache_param->tmx; \ |
| 128 |
|
} while (0) |
| 129 |
|
|
| 130 |
|
#define FIND_BE_PARAM(tmx, dst, be) \ |
| 131 |
|
FIND_BE_SPEC(tmx, dst, be, 0) |
| 132 |
|
|
| 133 |
|
#define FIND_BE_TMO(tmx, dst, be) \ |
| 134 |
|
FIND_BE_SPEC(tmx, dst, be, -1.0) |
| 135 |
|
|
| 136 |
|
#define BE_BUSY(be) \ |
| 137 |
|
(be->max_connections > 0 && be->n_conn >= be->max_connections) |
| 138 |
|
|
| 139 |
|
/*--------------------------------------------------------------------*/ |
| 140 |
|
|
| 141 |
|
static void |
| 142 |
569 |
vbe_connwait_broadcast(const struct backend *bp) |
| 143 |
|
{ |
| 144 |
|
struct connwait *cw; |
| 145 |
|
|
| 146 |
569 |
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); |
| 147 |
|
|
| 148 |
569 |
Lck_Lock(bp->director->mtx); |
| 149 |
641 |
VTAILQ_FOREACH(cw, &bp->cw_head, cw_list) { |
| 150 |
72 |
CHECK_OBJ(cw, CONNWAIT_MAGIC); |
| 151 |
72 |
assert(cw->cw_state == CW_QUEUED); |
| 152 |
72 |
PTOK(pthread_cond_signal(&cw->cw_cond)); |
| 153 |
72 |
} |
| 154 |
569 |
Lck_Unlock(bp->director->mtx); |
| 155 |
569 |
} |
| 156 |
|
|
| 157 |
|
static void |
| 158 |
17919 |
vbe_connwait_signal_locked(const struct backend *bp) |
| 159 |
|
{ |
| 160 |
|
struct connwait *cw; |
| 161 |
|
|
| 162 |
17919 |
Lck_AssertHeld(bp->director->mtx); |
| 163 |
|
|
| 164 |
17919 |
if (bp->n_conn < bp->max_connections) { |
| 165 |
88 |
cw = VTAILQ_FIRST(&bp->cw_head); |
| 166 |
88 |
if (cw != NULL) { |
| 167 |
8 |
CHECK_OBJ(cw, CONNWAIT_MAGIC); |
| 168 |
8 |
assert(cw->cw_state == CW_QUEUED); |
| 169 |
8 |
PTOK(pthread_cond_signal(&cw->cw_cond)); |
| 170 |
8 |
} |
| 171 |
88 |
} |
| 172 |
17919 |
} |
| 173 |
|
|
| 174 |
|
static void |
| 175 |
18031 |
vbe_connwait_fini(struct connwait *cw) |
| 176 |
|
{ |
| 177 |
18031 |
CHECK_OBJ_NOTNULL(cw, CONNWAIT_MAGIC); |
| 178 |
18031 |
assert(cw->cw_state != CW_QUEUED); |
| 179 |
18031 |
PTOK(pthread_cond_destroy(&cw->cw_cond)); |
| 180 |
18031 |
FINI_OBJ(cw); |
| 181 |
18031 |
} |
| 182 |
|
|
| 183 |
|
/*-------------------------------------------------------------------- |
| 184 |
|
* Get a connection to the backend |
| 185 |
|
* |
| 186 |
|
* note: wrk is a separate argument because it differs for pipe vs. fetch |
| 187 |
|
*/ |
| 188 |
|
|
| 189 |
|
static struct pfd * |
| 190 |
18072 |
vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp, |
| 191 |
|
unsigned force_fresh) |
| 192 |
|
{ |
| 193 |
|
struct busyobj *bo; |
| 194 |
|
struct pfd *pfd; |
| 195 |
|
int *fdp, err; |
| 196 |
|
vtim_dur tmod; |
| 197 |
|
char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE]; |
| 198 |
|
char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE]; |
| 199 |
|
unsigned wait_limit; |
| 200 |
|
vtim_dur wait_tmod; |
| 201 |
|
vtim_dur wait_end; |
| 202 |
|
struct connwait cw[1]; |
| 203 |
|
|
| 204 |
18072 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 205 |
18072 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
| 206 |
18072 |
bo = ctx->bo; |
| 207 |
18072 |
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); |
| 208 |
18072 |
AN(bp->vsc); |
| 209 |
|
|
| 210 |
18072 |
if (!VRT_Healthy(ctx, dir, NULL)) { |
| 211 |
80 |
VSLb(bo->vsl, SLT_FetchError, |
| 212 |
40 |
"backend %s: unhealthy", VRT_BACKEND_string(dir)); |
| 213 |
40 |
bp->vsc->unhealthy++; |
| 214 |
40 |
VSC_C_main->backend_unhealthy++; |
| 215 |
40 |
return (NULL); |
| 216 |
|
} |
| 217 |
18032 |
INIT_OBJ(cw, CONNWAIT_MAGIC); |
| 218 |
18032 |
PTOK(pthread_cond_init(&cw->cw_cond, NULL)); |
| 219 |
18032 |
Lck_Lock(bp->director->mtx); |
| 220 |
18032 |
FIND_BE_PARAM(backend_wait_limit, wait_limit, bp); |
| 221 |
18032 |
FIND_BE_TMO(backend_wait_timeout, wait_tmod, bp); |
| 222 |
18032 |
cw->cw_state = CW_DO_CONNECT; |
| 223 |
18032 |
if (!VTAILQ_EMPTY(&bp->cw_head) || BE_BUSY(bp)) |
| 224 |
112 |
cw->cw_state = CW_BE_BUSY; |
| 225 |
|
|
| 226 |
18128 |
if (cw->cw_state == CW_BE_BUSY && wait_limit > 0 && |
| 227 |
96 |
wait_tmod > 0.0 && bp->cw_count < wait_limit) { |
| 228 |
96 |
VTAILQ_INSERT_TAIL(&bp->cw_head, cw, cw_list); |
| 229 |
96 |
bp->cw_count++; |
| 230 |
96 |
VSC_C_main->backend_wait++; |
| 231 |
96 |
cw->cw_state = CW_QUEUED; |
| 232 |
96 |
wait_end = VTIM_real() + wait_tmod; |
| 233 |
96 |
do { |
| 234 |
192 |
err = Lck_CondWaitUntil(&cw->cw_cond, bp->director->mtx, |
| 235 |
96 |
wait_end); |
| 236 |
96 |
} while (err == EINTR); |
| 237 |
96 |
assert(cw->cw_state == CW_QUEUED); |
| 238 |
96 |
VTAILQ_REMOVE(&bp->cw_head, cw, cw_list); |
| 239 |
96 |
cw->cw_state = CW_DEQUEUED; |
| 240 |
96 |
bp->cw_count--; |
| 241 |
96 |
if ((err != 0 && BE_BUSY(bp)) || !VRT_Healthy(ctx, dir, NULL)) { |
| 242 |
88 |
VSC_C_main->backend_wait_fail++; |
| 243 |
88 |
cw->cw_state = CW_BE_BUSY; |
| 244 |
88 |
} |
| 245 |
96 |
} |
| 246 |
18032 |
if (cw->cw_state != CW_BE_BUSY) |
| 247 |
17928 |
bp->n_conn++; |
| 248 |
|
|
| 249 |
18032 |
if (!VTAILQ_EMPTY(&bp->cw_head) && !BE_BUSY(bp)) { |
| 250 |
|
/* Signal the new head of the waiting queue */ |
| 251 |
0 |
vbe_connwait_signal_locked(bp); |
| 252 |
0 |
} |
| 253 |
|
|
| 254 |
18032 |
Lck_Unlock(bp->director->mtx); |
| 255 |
|
|
| 256 |
18032 |
if (cw->cw_state == CW_BE_BUSY) { |
| 257 |
208 |
VSLb(bo->vsl, SLT_FetchError, |
| 258 |
104 |
"backend %s: busy", VRT_BACKEND_string(dir)); |
| 259 |
104 |
bp->vsc->busy++; |
| 260 |
104 |
VSC_C_main->backend_busy++; |
| 261 |
104 |
vbe_connwait_fini(cw); |
| 262 |
104 |
return (NULL); |
| 263 |
|
} |
| 264 |
|
|
| 265 |
17928 |
AZ(bo->htc); |
| 266 |
17928 |
bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc); |
| 267 |
|
/* XXX: we may want to detect the ws overflow sooner */ |
| 268 |
17928 |
if (bo->htc == NULL) { |
| 269 |
248 |
VSLb(bo->vsl, SLT_FetchError, "out of workspace"); |
| 270 |
|
/* XXX: counter ? */ |
| 271 |
248 |
Lck_Lock(bp->director->mtx); |
| 272 |
248 |
bp->n_conn--; |
| 273 |
248 |
vbe_connwait_signal_locked(bp); |
| 274 |
248 |
Lck_Unlock(bp->director->mtx); |
| 275 |
248 |
vbe_connwait_fini(cw); |
| 276 |
248 |
return (NULL); |
| 277 |
|
} |
| 278 |
17680 |
bo->htc->doclose = SC_NULL; |
| 279 |
17680 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
| 280 |
|
|
| 281 |
17680 |
FIND_TMO(connect_timeout, tmod, bo, bp); |
| 282 |
17680 |
pfd = VCP_Get(bp->conn_pool, tmod, wrk, force_fresh, &err); |
| 283 |
17680 |
if (pfd == NULL) { |
| 284 |
248 |
Lck_Lock(bp->director->mtx); |
| 285 |
248 |
VBE_Connect_Error(bp->vsc, err); |
| 286 |
248 |
bp->n_conn--; |
| 287 |
248 |
vbe_connwait_signal_locked(bp); |
| 288 |
248 |
Lck_Unlock(bp->director->mtx); |
| 289 |
496 |
VSLb(bo->vsl, SLT_FetchError, |
| 290 |
|
"backend %s: fail errno %d (%s)", |
| 291 |
248 |
VRT_BACKEND_string(dir), err, VAS_errtxt(err)); |
| 292 |
248 |
VSC_C_main->backend_fail++; |
| 293 |
248 |
bo->htc = NULL; |
| 294 |
248 |
vbe_connwait_fini(cw); |
| 295 |
248 |
return (NULL); |
| 296 |
|
} |
| 297 |
|
|
| 298 |
17432 |
VSLb_ts_busyobj(bo, "Connected", W_TIM_real(wrk)); |
| 299 |
17432 |
fdp = PFD_Fd(pfd); |
| 300 |
17432 |
AN(fdp); |
| 301 |
17432 |
assert(*fdp >= 0); |
| 302 |
|
|
| 303 |
17432 |
Lck_Lock(bp->director->mtx); |
| 304 |
17432 |
bp->vsc->conn++; |
| 305 |
17432 |
bp->vsc->req++; |
| 306 |
17432 |
Lck_Unlock(bp->director->mtx); |
| 307 |
|
|
| 308 |
17432 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
| 309 |
|
|
| 310 |
17432 |
err = 0; |
| 311 |
17432 |
if (bp->proxy_header != 0) |
| 312 |
40 |
err += VPX_Send_Proxy(*fdp, bp->proxy_header, bo->sp); |
| 313 |
17432 |
if (err < 0) { |
| 314 |
0 |
VSLb(bo->vsl, SLT_FetchError, |
| 315 |
|
"backend %s: proxy write errno %d (%s)", |
| 316 |
0 |
VRT_BACKEND_string(dir), |
| 317 |
0 |
errno, VAS_errtxt(errno)); |
| 318 |
|
// account as if connect failed - good idea? |
| 319 |
0 |
VSC_C_main->backend_fail++; |
| 320 |
0 |
bo->htc = NULL; |
| 321 |
0 |
VCP_Close(&pfd); |
| 322 |
0 |
AZ(pfd); |
| 323 |
0 |
Lck_Lock(bp->director->mtx); |
| 324 |
0 |
bp->n_conn--; |
| 325 |
0 |
bp->vsc->conn--; |
| 326 |
0 |
bp->vsc->req--; |
| 327 |
0 |
vbe_connwait_signal_locked(bp); |
| 328 |
0 |
Lck_Unlock(bp->director->mtx); |
| 329 |
0 |
vbe_connwait_fini(cw); |
| 330 |
0 |
return (NULL); |
| 331 |
|
} |
| 332 |
17432 |
bo->acct.bereq_hdrbytes += err; |
| 333 |
|
|
| 334 |
17432 |
PFD_LocalName(pfd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1); |
| 335 |
17432 |
PFD_RemoteName(pfd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2); |
| 336 |
17432 |
if (PFD_State(pfd) != PFD_STATE_STOLEN) { |
| 337 |
21828 |
VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s connect", |
| 338 |
10914 |
*fdp, VRT_BACKEND_string(dir), abuf2, pbuf2, abuf1, pbuf1); |
| 339 |
10914 |
} else { |
| 340 |
13036 |
VSLb(bo->vsl, SLT_BackendOpen, |
| 341 |
6518 |
"%d %s %s %s %s %s reuse %.6f %ju", *fdp, |
| 342 |
6518 |
VRT_BACKEND_string(dir), abuf2, pbuf2, abuf1, pbuf1, |
| 343 |
6518 |
PFD_Age(pfd), (uintmax_t)PFD_Reused(pfd)); |
| 344 |
|
} |
| 345 |
|
|
| 346 |
17432 |
INIT_OBJ(bo->htc, HTTP_CONN_MAGIC); |
| 347 |
17432 |
bo->htc->priv = pfd; |
| 348 |
17432 |
bo->htc->rfd = fdp; |
| 349 |
17432 |
bo->htc->doclose = SC_NULL; |
| 350 |
17432 |
FIND_TMO(first_byte_timeout, |
| 351 |
|
bo->htc->first_byte_timeout, bo, bp); |
| 352 |
17432 |
FIND_TMO(between_bytes_timeout, |
| 353 |
|
bo->htc->between_bytes_timeout, bo, bp); |
| 354 |
17432 |
vbe_connwait_fini(cw); |
| 355 |
17432 |
return (pfd); |
| 356 |
18072 |
} |
| 357 |
|
|
| 358 |
|
static void v_matchproto_(vdi_finish_f) |
| 359 |
17424 |
vbe_dir_finish(VRT_CTX, VCL_BACKEND d) |
| 360 |
|
{ |
| 361 |
|
struct backend *bp; |
| 362 |
|
struct busyobj *bo; |
| 363 |
|
struct pfd *pfd; |
| 364 |
|
|
| 365 |
17424 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 366 |
17424 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
| 367 |
17424 |
bo = ctx->bo; |
| 368 |
17424 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
| 369 |
17424 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
| 370 |
|
|
| 371 |
17424 |
CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC); |
| 372 |
17424 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
| 373 |
|
|
| 374 |
17424 |
pfd = bo->htc->priv; |
| 375 |
17424 |
bo->htc->priv = NULL; |
| 376 |
17424 |
if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) { |
| 377 |
6338 |
VSLb(bo->vsl, SLT_BackendClose, "%d %s close %s", *PFD_Fd(pfd), |
| 378 |
3169 |
VRT_BACKEND_string(d), bo->htc->doclose->name); |
| 379 |
3169 |
VCP_Close(&pfd); |
| 380 |
3169 |
AZ(pfd); |
| 381 |
3169 |
Lck_Lock(bp->director->mtx); |
| 382 |
3169 |
} else { |
| 383 |
14255 |
assert (PFD_State(pfd) == PFD_STATE_USED); |
| 384 |
28510 |
VSLb(bo->vsl, SLT_BackendClose, "%d %s recycle", *PFD_Fd(pfd), |
| 385 |
14255 |
VRT_BACKEND_string(d)); |
| 386 |
14255 |
Lck_Lock(bp->director->mtx); |
| 387 |
14255 |
VSC_C_main->backend_recycle++; |
| 388 |
14255 |
VCP_Recycle(bo->wrk, &pfd); |
| 389 |
|
} |
| 390 |
17424 |
assert(bp->n_conn > 0); |
| 391 |
17424 |
bp->n_conn--; |
| 392 |
17424 |
AN(bp->vsc); |
| 393 |
17424 |
bp->vsc->conn--; |
| 394 |
|
#define ACCT(foo) bp->vsc->foo += bo->acct.foo; |
| 395 |
|
#include "tbl/acct_fields_bereq.h" |
| 396 |
|
vbe_connwait_signal_locked(bp); |
| 397 |
|
Lck_Unlock(bp->director->mtx); |
| 398 |
|
bo->htc = NULL; |
| 399 |
|
} |
| 400 |
|
|
| 401 |
|
static int v_matchproto_(vdi_gethdrs_f) |
| 402 |
17832 |
vbe_dir_gethdrs(VRT_CTX, VCL_BACKEND d) |
| 403 |
|
{ |
| 404 |
17832 |
int i, retry_connect = 1; |
| 405 |
|
struct backend *bp; |
| 406 |
|
struct pfd *pfd; |
| 407 |
|
struct busyobj *bo; |
| 408 |
|
struct worker *wrk; |
| 409 |
|
|
| 410 |
17832 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 411 |
17832 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
| 412 |
17832 |
bo = ctx->bo; |
| 413 |
17832 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
| 414 |
17832 |
CHECK_OBJ_NOTNULL(bo->bereq, HTTP_MAGIC); |
| 415 |
17832 |
if (bo->htc != NULL) |
| 416 |
0 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
| 417 |
17832 |
wrk = ctx->bo->wrk; |
| 418 |
17832 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 419 |
17832 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
| 420 |
|
|
| 421 |
|
/* |
| 422 |
|
* Now that we know our backend, we can set a default Host: |
| 423 |
|
* header if one is necessary. This cannot be done in the VCL |
| 424 |
|
* because the backend may be chosen by a director. |
| 425 |
|
*/ |
| 426 |
17832 |
if (!http_GetHdr(bo->bereq, H_Host, NULL) && bp->hosthdr != NULL) |
| 427 |
544 |
http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr); |
| 428 |
|
|
| 429 |
17832 |
do { |
| 430 |
17881 |
if (bo->htc != NULL) |
| 431 |
0 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
| 432 |
17881 |
pfd = vbe_dir_getfd(ctx, wrk, d, bp, retry_connect == 0 ? 1 : 0); |
| 433 |
17881 |
if (pfd == NULL) |
| 434 |
632 |
return (-1); |
| 435 |
17249 |
AN(bo->htc); |
| 436 |
17249 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
| 437 |
17249 |
if (PFD_State(pfd) != PFD_STATE_STOLEN) |
| 438 |
10746 |
retry_connect = 0; |
| 439 |
|
|
| 440 |
34498 |
i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes, |
| 441 |
17249 |
&bo->acct.bereq_bodybytes); |
| 442 |
|
|
| 443 |
17249 |
if (i == 0 && PFD_State(pfd) != PFD_STATE_USED) { |
| 444 |
19482 |
if (VCP_Wait(wrk, pfd, VTIM_real() + |
| 445 |
12988 |
bo->htc->first_byte_timeout) != 0) { |
| 446 |
8 |
bo->htc->doclose = SC_RX_TIMEOUT; |
| 447 |
8 |
VSLb(bo->vsl, SLT_FetchError, |
| 448 |
|
"first byte timeout (reused connection)"); |
| 449 |
8 |
retry_connect = 0; |
| 450 |
8 |
} |
| 451 |
6494 |
} |
| 452 |
|
|
| 453 |
17249 |
if (bo->htc->doclose == SC_NULL) { |
| 454 |
16977 |
assert(PFD_State(pfd) == PFD_STATE_USED); |
| 455 |
16977 |
if (i == 0) |
| 456 |
16976 |
i = V1F_FetchRespHdr(bo); |
| 457 |
16977 |
if (i == 0) { |
| 458 |
16327 |
AN(bo->htc->priv); |
| 459 |
16327 |
http_VSL_log(bo->beresp); |
| 460 |
16327 |
return (0); |
| 461 |
|
} |
| 462 |
648 |
} |
| 463 |
920 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
| 464 |
|
|
| 465 |
|
/* |
| 466 |
|
* If we recycled a backend connection, there is a finite chance |
| 467 |
|
* that the backend closed it before we got the bereq to it. |
| 468 |
|
* In that case do a single automatic retry if req.body allows. |
| 469 |
|
*/ |
| 470 |
920 |
vbe_dir_finish(ctx, d); |
| 471 |
920 |
AZ(bo->htc); |
| 472 |
920 |
if (i < 0 || retry_connect == 0) |
| 473 |
840 |
break; |
| 474 |
80 |
if (bo->no_retry != NULL) |
| 475 |
32 |
break; |
| 476 |
48 |
VSC_C_main->backend_retry++; |
| 477 |
48 |
} while (retry_connect--); |
| 478 |
873 |
return (-1); |
| 479 |
17832 |
} |
| 480 |
|
|
| 481 |
|
static VCL_IP v_matchproto_(vdi_getip_f) |
| 482 |
8 |
vbe_dir_getip(VRT_CTX, VCL_BACKEND d) |
| 483 |
|
{ |
| 484 |
|
struct pfd *pfd; |
| 485 |
|
|
| 486 |
8 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 487 |
8 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
| 488 |
8 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
| 489 |
8 |
CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC); |
| 490 |
8 |
pfd = ctx->bo->htc->priv; |
| 491 |
|
|
| 492 |
8 |
return (VCP_GetIp(pfd)); |
| 493 |
|
} |
| 494 |
|
|
| 495 |
|
/*--------------------------------------------------------------------*/ |
| 496 |
|
|
| 497 |
|
static stream_close_t v_matchproto_(vdi_http1pipe_f) |
| 498 |
192 |
vbe_dir_http1pipe(VRT_CTX, VCL_BACKEND d) |
| 499 |
|
{ |
| 500 |
|
int i; |
| 501 |
|
stream_close_t retval; |
| 502 |
|
struct backend *bp; |
| 503 |
|
struct v1p_acct v1a; |
| 504 |
|
struct pfd *pfd; |
| 505 |
|
vtim_real deadline; |
| 506 |
|
|
| 507 |
192 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 508 |
192 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
| 509 |
192 |
CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC); |
| 510 |
192 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
| 511 |
192 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
| 512 |
|
|
| 513 |
192 |
memset(&v1a, 0, sizeof v1a); |
| 514 |
|
|
| 515 |
|
/* This is hackish... */ |
| 516 |
192 |
v1a.req = ctx->req->acct.req_hdrbytes; |
| 517 |
192 |
ctx->req->acct.req_hdrbytes = 0; |
| 518 |
|
|
| 519 |
192 |
ctx->req->res_pipe = 1; |
| 520 |
|
|
| 521 |
192 |
retval = SC_TX_ERROR; |
| 522 |
192 |
pfd = vbe_dir_getfd(ctx, ctx->req->wrk, d, bp, 0); |
| 523 |
|
|
| 524 |
192 |
if (pfd != NULL) { |
| 525 |
184 |
CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC); |
| 526 |
368 |
i = V1F_SendReq(ctx->req->wrk, ctx->bo, |
| 527 |
184 |
&v1a.bereq, &v1a.out); |
| 528 |
184 |
VSLb_ts_req(ctx->req, "Pipe", W_TIM_real(ctx->req->wrk)); |
| 529 |
184 |
if (i == 0) { |
| 530 |
184 |
deadline = ctx->bo->task_deadline; |
| 531 |
184 |
if (isnan(deadline)) |
| 532 |
176 |
deadline = cache_param->pipe_task_deadline; |
| 533 |
184 |
if (deadline > 0.) |
| 534 |
24 |
deadline += ctx->req->sp->t_idle; |
| 535 |
368 |
retval = V1P_Process(ctx->req, *PFD_Fd(pfd), &v1a, |
| 536 |
184 |
deadline); |
| 537 |
184 |
} |
| 538 |
184 |
VSLb_ts_req(ctx->req, "PipeSess", W_TIM_real(ctx->req->wrk)); |
| 539 |
184 |
ctx->bo->htc->doclose = retval; |
| 540 |
184 |
vbe_dir_finish(ctx, d); |
| 541 |
184 |
} |
| 542 |
192 |
V1P_Charge(ctx->req, &v1a, bp->vsc); |
| 543 |
192 |
CHECK_OBJ_NOTNULL(retval, STREAM_CLOSE_MAGIC); |
| 544 |
192 |
return (retval); |
| 545 |
|
} |
| 546 |
|
|
| 547 |
|
/*--------------------------------------------------------------------*/ |
| 548 |
|
|
| 549 |
|
static void |
| 550 |
12559 |
vbe_dir_event(const struct director *d, enum vcl_event_e ev) |
| 551 |
|
{ |
| 552 |
|
struct backend *bp; |
| 553 |
|
|
| 554 |
12559 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
| 555 |
12559 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
| 556 |
|
|
| 557 |
12559 |
if (ev == VCL_EVENT_WARM) { |
| 558 |
10720 |
VRT_VSC_Reveal(bp->vsc_seg); |
| 559 |
10720 |
if (bp->probe != NULL) |
| 560 |
296 |
VBP_Control(bp, 1); |
| 561 |
12559 |
} else if (ev == VCL_EVENT_COLD) { |
| 562 |
659 |
if (bp->probe != NULL) |
| 563 |
56 |
VBP_Control(bp, 0); |
| 564 |
659 |
VRT_VSC_Hide(bp->vsc_seg); |
| 565 |
1839 |
} else if (ev == VCL_EVENT_DISCARD) { |
| 566 |
531 |
VRT_DelDirector(&bp->director); |
| 567 |
1180 |
} else if (ev == VDI_EVENT_SICK) { |
| 568 |
649 |
const struct vdi_ahealth *ah = d->vdir->admin_health; |
| 569 |
|
|
| 570 |
649 |
if (ah == VDI_AH_SICK || (ah == VDI_AH_AUTO && bp->sick)) |
| 571 |
569 |
vbe_connwait_broadcast(bp); |
| 572 |
649 |
} |
| 573 |
12559 |
} |
| 574 |
|
|
| 575 |
|
/*---------------------------------------------------------------------*/ |
| 576 |
|
|
| 577 |
|
static void |
| 578 |
643 |
vbe_free(struct backend *be) |
| 579 |
|
{ |
| 580 |
|
|
| 581 |
643 |
CHECK_OBJ_NOTNULL(be, BACKEND_MAGIC); |
| 582 |
|
|
| 583 |
643 |
if (be->probe != NULL) |
| 584 |
48 |
VBP_Remove(be); |
| 585 |
|
|
| 586 |
643 |
VSC_vbe_Destroy(&be->vsc_seg); |
| 587 |
643 |
Lck_Lock(&backends_mtx); |
| 588 |
643 |
VSC_C_main->n_backend--; |
| 589 |
643 |
Lck_Unlock(&backends_mtx); |
| 590 |
643 |
VCP_Rel(&be->conn_pool); |
| 591 |
|
|
| 592 |
|
#define DA(x) do { if (be->x != NULL) free(be->x); } while (0) |
| 593 |
|
#define DN(x) /**/ |
| 594 |
643 |
VRT_BACKEND_HANDLE(); |
| 595 |
|
#undef DA |
| 596 |
|
#undef DN |
| 597 |
643 |
free(be->endpoint); |
| 598 |
|
|
| 599 |
643 |
assert(VTAILQ_EMPTY(&be->cw_head)); |
| 600 |
643 |
FREE_OBJ(be); |
| 601 |
643 |
} |
| 602 |
|
|
| 603 |
|
static void v_matchproto_(vdi_destroy_f) |
| 604 |
643 |
vbe_destroy(const struct director *d) |
| 605 |
|
{ |
| 606 |
|
struct backend *be; |
| 607 |
|
|
| 608 |
643 |
CAST_OBJ_NOTNULL(be, d->priv, BACKEND_MAGIC); |
| 609 |
643 |
vbe_free(be); |
| 610 |
643 |
} |
| 611 |
|
|
| 612 |
|
/*--------------------------------------------------------------------*/ |
| 613 |
|
|
| 614 |
|
static void |
| 615 |
48 |
vbe_panic(const struct director *d, struct vsb *vsb) |
| 616 |
|
{ |
| 617 |
|
struct backend *bp; |
| 618 |
|
|
| 619 |
48 |
PAN_CheckMagic(vsb, d, DIRECTOR_MAGIC); |
| 620 |
48 |
bp = d->priv; |
| 621 |
48 |
PAN_CheckMagic(vsb, bp, BACKEND_MAGIC); |
| 622 |
|
|
| 623 |
48 |
VCP_Panic(vsb, bp->conn_pool); |
| 624 |
48 |
VSB_printf(vsb, "hosthdr = %s,\n", bp->hosthdr); |
| 625 |
48 |
VSB_printf(vsb, "n_conn = %u,\n", bp->n_conn); |
| 626 |
48 |
} |
| 627 |
|
|
| 628 |
|
/*-------------------------------------------------------------------- |
| 629 |
|
*/ |
| 630 |
|
|
| 631 |
|
static void v_matchproto_(vdi_list_f) |
| 632 |
9312 |
vbe_list(VRT_CTX, const struct director *d, struct vsb *vsb, int pflag, |
| 633 |
|
int jflag) |
| 634 |
|
{ |
| 635 |
|
char buf[VTCP_ADDRBUFSIZE]; |
| 636 |
|
struct backend *bp; |
| 637 |
|
struct vrt_endpoint *vep; |
| 638 |
|
|
| 639 |
9312 |
(void)ctx; |
| 640 |
|
|
| 641 |
9312 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
| 642 |
9312 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
| 643 |
9312 |
CHECK_OBJ_NOTNULL(bp->endpoint, VRT_ENDPOINT_MAGIC); |
| 644 |
|
|
| 645 |
9312 |
vep = bp->endpoint; |
| 646 |
|
|
| 647 |
9312 |
if (bp->probe != NULL) |
| 648 |
624 |
VBP_Status(vsb, bp, pflag, jflag); |
| 649 |
8688 |
else if (jflag && pflag) |
| 650 |
24 |
VSB_cat(vsb, "{},\n"); |
| 651 |
8664 |
else if (jflag) |
| 652 |
88 |
VSB_cat(vsb, "[0, 0, \"healthy\"]"); |
| 653 |
8576 |
else if (pflag) |
| 654 |
72 |
return; |
| 655 |
|
else |
| 656 |
8504 |
VSB_cat(vsb, "0/0\thealthy"); |
| 657 |
|
|
| 658 |
9240 |
if (jflag && pflag) { |
| 659 |
40 |
if (vep->ipv4 != NULL) { |
| 660 |
40 |
VTCP_name(vep->ipv4, buf, sizeof buf, NULL, 0); |
| 661 |
40 |
VSB_printf(vsb, "\"ipv4\": \"%s\",\n", buf); |
| 662 |
40 |
} |
| 663 |
40 |
if (vep->ipv6 != NULL) { |
| 664 |
0 |
VTCP_name(vep->ipv6, buf, sizeof buf, NULL, 0); |
| 665 |
0 |
VSB_printf(vsb, "\"ipv6\": \"%s\",\n", buf); |
| 666 |
0 |
} |
| 667 |
40 |
} |
| 668 |
9312 |
} |
| 669 |
|
|
| 670 |
|
/*-------------------------------------------------------------------- |
| 671 |
|
*/ |
| 672 |
|
|
| 673 |
|
static VCL_BOOL v_matchproto_(vdi_healthy_f) |
| 674 |
536 |
vbe_healthy(VRT_CTX, VCL_BACKEND d, VCL_TIME *t) |
| 675 |
|
{ |
| 676 |
|
struct backend *bp; |
| 677 |
|
|
| 678 |
536 |
(void)ctx; |
| 679 |
536 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
| 680 |
536 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
| 681 |
|
|
| 682 |
536 |
if (t != NULL) |
| 683 |
336 |
*t = bp->changed; |
| 684 |
|
|
| 685 |
536 |
return (!bp->sick); |
| 686 |
|
} |
| 687 |
|
|
| 688 |
|
/*-------------------------------------------------------------------- |
| 689 |
|
*/ |
| 690 |
|
|
| 691 |
|
static const struct vdi_methods vbe_methods[1] = {{ |
| 692 |
|
.magic = VDI_METHODS_MAGIC, |
| 693 |
|
.type = "backend", |
| 694 |
|
.http1pipe = vbe_dir_http1pipe, |
| 695 |
|
.gethdrs = vbe_dir_gethdrs, |
| 696 |
|
.getip = vbe_dir_getip, |
| 697 |
|
.finish = vbe_dir_finish, |
| 698 |
|
.event = vbe_dir_event, |
| 699 |
|
.destroy = vbe_destroy, |
| 700 |
|
.panic = vbe_panic, |
| 701 |
|
.list = vbe_list, |
| 702 |
|
.healthy = vbe_healthy |
| 703 |
|
}}; |
| 704 |
|
|
| 705 |
|
static const struct vdi_methods vbe_methods_noprobe[1] = {{ |
| 706 |
|
.magic = VDI_METHODS_MAGIC, |
| 707 |
|
.type = "backend", |
| 708 |
|
.http1pipe = vbe_dir_http1pipe, |
| 709 |
|
.gethdrs = vbe_dir_gethdrs, |
| 710 |
|
.getip = vbe_dir_getip, |
| 711 |
|
.finish = vbe_dir_finish, |
| 712 |
|
.event = vbe_dir_event, |
| 713 |
|
.destroy = vbe_destroy, |
| 714 |
|
.panic = vbe_panic, |
| 715 |
|
.list = vbe_list |
| 716 |
|
}}; |
| 717 |
|
|
| 718 |
|
/*-------------------------------------------------------------------- |
| 719 |
|
* Create a new static or dynamic director::backend instance. |
| 720 |
|
*/ |
| 721 |
|
|
| 722 |
|
size_t |
| 723 |
9936 |
VRT_backend_vsm_need(VRT_CTX) |
| 724 |
|
{ |
| 725 |
9936 |
(void)ctx; |
| 726 |
9936 |
return (VRT_VSC_Overhead(VSC_vbe_size)); |
| 727 |
|
} |
| 728 |
|
|
| 729 |
|
/* |
| 730 |
|
* The new_backend via parameter is a VCL_BACKEND, but we need a (struct |
| 731 |
|
* backend) |
| 732 |
|
* |
| 733 |
|
* For now, we resolve it when creating the backend, which implies no redundancy |
| 734 |
|
* / load balancing across the via director if it is more than a simple backend. |
| 735 |
|
*/ |
| 736 |
|
|
| 737 |
|
static const struct backend * |
| 738 |
64 |
via_resolve(VRT_CTX, const struct vrt_endpoint *vep, VCL_BACKEND via) |
| 739 |
|
{ |
| 740 |
64 |
const struct backend *viabe = NULL; |
| 741 |
|
|
| 742 |
64 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
| 743 |
64 |
CHECK_OBJ_NOTNULL(via, DIRECTOR_MAGIC); |
| 744 |
|
|
| 745 |
64 |
if (vep->uds_path) { |
| 746 |
0 |
VRT_fail(ctx, "Via is only supported for IP addresses"); |
| 747 |
0 |
return (NULL); |
| 748 |
|
} |
| 749 |
|
|
| 750 |
64 |
via = VRT_DirectorResolve(ctx, via); |
| 751 |
|
|
| 752 |
64 |
if (via == NULL) { |
| 753 |
0 |
VRT_fail(ctx, "Via resolution failed"); |
| 754 |
0 |
return (NULL); |
| 755 |
|
} |
| 756 |
|
|
| 757 |
64 |
CHECK_OBJ(via, DIRECTOR_MAGIC); |
| 758 |
64 |
CHECK_OBJ_NOTNULL(via->vdir, VCLDIR_MAGIC); |
| 759 |
|
|
| 760 |
64 |
if (via->vdir->methods == vbe_methods || |
| 761 |
64 |
via->vdir->methods == vbe_methods_noprobe) |
| 762 |
64 |
CAST_OBJ_NOTNULL(viabe, via->priv, BACKEND_MAGIC); |
| 763 |
|
|
| 764 |
64 |
if (viabe == NULL) |
| 765 |
0 |
VRT_fail(ctx, "Via does not resolve to a backend"); |
| 766 |
|
|
| 767 |
64 |
return (viabe); |
| 768 |
64 |
} |
| 769 |
|
|
| 770 |
|
/* |
| 771 |
|
* construct a new endpoint identical to vep with sa in a proxy header |
| 772 |
|
*/ |
| 773 |
|
static struct vrt_endpoint * |
| 774 |
64 |
via_endpoint(const struct vrt_endpoint *vep, const struct suckaddr *sa, |
| 775 |
|
const char *auth) |
| 776 |
|
{ |
| 777 |
|
struct vsb *preamble; |
| 778 |
|
struct vrt_blob blob[1]; |
| 779 |
|
struct vrt_endpoint *nvep, *ret; |
| 780 |
|
const struct suckaddr *client_bogo; |
| 781 |
|
|
| 782 |
64 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
| 783 |
64 |
AN(sa); |
| 784 |
|
|
| 785 |
64 |
nvep = VRT_Endpoint_Clone(vep); |
| 786 |
64 |
CHECK_OBJ_NOTNULL(nvep, VRT_ENDPOINT_MAGIC); |
| 787 |
|
|
| 788 |
64 |
if (VSA_Get_Proto(sa) == AF_INET6) |
| 789 |
0 |
client_bogo = bogo_ip6; |
| 790 |
|
else |
| 791 |
64 |
client_bogo = bogo_ip; |
| 792 |
|
|
| 793 |
64 |
preamble = VSB_new_auto(); |
| 794 |
64 |
AN(preamble); |
| 795 |
64 |
VPX_Format_Proxy(preamble, 2, client_bogo, sa, auth); |
| 796 |
64 |
INIT_OBJ(blob, VRT_BLOB_MAGIC); |
| 797 |
64 |
blob->blob = VSB_data(preamble); |
| 798 |
64 |
blob->len = VSB_len(preamble); |
| 799 |
64 |
nvep->preamble = blob; |
| 800 |
64 |
ret = VRT_Endpoint_Clone(nvep); |
| 801 |
64 |
CHECK_OBJ_NOTNULL(ret, VRT_ENDPOINT_MAGIC); |
| 802 |
64 |
VSB_destroy(&preamble); |
| 803 |
64 |
FREE_OBJ(nvep); |
| 804 |
|
|
| 805 |
64 |
return (ret); |
| 806 |
|
} |
| 807 |
|
|
| 808 |
|
VCL_BACKEND |
| 809 |
10776 |
VRT_new_backend_clustered(VRT_CTX, struct vsmw_cluster *vc, |
| 810 |
|
const struct vrt_backend *vrt, VCL_BACKEND via) |
| 811 |
|
{ |
| 812 |
|
struct backend *be; |
| 813 |
|
struct vcl *vcl; |
| 814 |
|
const struct vrt_backend_probe *vbp; |
| 815 |
|
const struct vrt_endpoint *vep; |
| 816 |
|
const struct vdi_methods *m; |
| 817 |
10776 |
const struct suckaddr *sa = NULL; |
| 818 |
|
char abuf[VTCP_ADDRBUFSIZE]; |
| 819 |
10776 |
const struct backend *viabe = NULL; |
| 820 |
|
|
| 821 |
10776 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 822 |
10776 |
CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC); |
| 823 |
10776 |
vep = vrt->endpoint; |
| 824 |
10776 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
| 825 |
10776 |
if (vep->uds_path == NULL) { |
| 826 |
10408 |
if (vep->ipv4 == NULL && vep->ipv6 == NULL) { |
| 827 |
0 |
VRT_fail(ctx, "%s: Illegal IP", __func__); |
| 828 |
0 |
return (NULL); |
| 829 |
|
} |
| 830 |
10408 |
} else { |
| 831 |
368 |
assert(vep->ipv4== NULL && vep->ipv6== NULL); |
| 832 |
|
} |
| 833 |
|
|
| 834 |
10776 |
if (via != NULL) { |
| 835 |
64 |
viabe = via_resolve(ctx, vep, via); |
| 836 |
64 |
if (viabe == NULL) |
| 837 |
0 |
return (NULL); |
| 838 |
64 |
} |
| 839 |
|
|
| 840 |
10776 |
vcl = ctx->vcl; |
| 841 |
10776 |
AN(vcl); |
| 842 |
10776 |
AN(vrt->vcl_name); |
| 843 |
|
|
| 844 |
|
/* Create new backend */ |
| 845 |
10776 |
ALLOC_OBJ(be, BACKEND_MAGIC); |
| 846 |
10776 |
if (be == NULL) |
| 847 |
0 |
return (NULL); |
| 848 |
10776 |
VTAILQ_INIT(&be->cw_head); |
| 849 |
|
|
| 850 |
|
#define DA(x) do { if (vrt->x != NULL) REPLACE((be->x), (vrt->x)); } while (0) |
| 851 |
|
#define DN(x) do { be->x = vrt->x; } while (0) |
| 852 |
10776 |
VRT_BACKEND_HANDLE(); |
| 853 |
|
#undef DA |
| 854 |
|
#undef DN |
| 855 |
|
|
| 856 |
|
#define CPTMO(a, b, x) do { \ |
| 857 |
|
if ((a)->x < 0.0 || isnan((a)->x)) \ |
| 858 |
|
(a)->x = (b)->x; \ |
| 859 |
|
} while(0) |
| 860 |
|
|
| 861 |
10760 |
if (viabe != NULL) { |
| 862 |
64 |
CPTMO(be, viabe, connect_timeout); |
| 863 |
64 |
CPTMO(be, viabe, first_byte_timeout); |
| 864 |
64 |
CPTMO(be, viabe, between_bytes_timeout); |
| 865 |
64 |
} |
| 866 |
|
#undef CPTMO |
| 867 |
|
|
| 868 |
10760 |
if (viabe || be->hosthdr == NULL) { |
| 869 |
72 |
if (vrt->endpoint->uds_path != NULL) |
| 870 |
8 |
sa = bogo_ip; |
| 871 |
64 |
else if (cache_param->prefer_ipv6 && vep->ipv6 != NULL) |
| 872 |
0 |
sa = vep->ipv6; |
| 873 |
64 |
else if (vep->ipv4!= NULL) |
| 874 |
64 |
sa = vep->ipv4; |
| 875 |
|
else |
| 876 |
0 |
sa = vep->ipv6; |
| 877 |
72 |
if (be->hosthdr == NULL) { |
| 878 |
8 |
VTCP_name(sa, abuf, sizeof abuf, NULL, 0); |
| 879 |
8 |
REPLACE(be->hosthdr, abuf); |
| 880 |
8 |
} |
| 881 |
72 |
} |
| 882 |
|
|
| 883 |
21520 |
be->vsc = VSC_vbe_New(vc, &be->vsc_seg, |
| 884 |
10760 |
"%s.%s", VCL_Name(ctx->vcl), vrt->vcl_name); |
| 885 |
10760 |
AN(be->vsc); |
| 886 |
10760 |
if (! vcl->temp->is_warm) |
| 887 |
10656 |
VRT_VSC_Hide(be->vsc_seg); |
| 888 |
|
|
| 889 |
10760 |
if (viabe) |
| 890 |
112 |
vep = be->endpoint = via_endpoint(viabe->endpoint, sa, |
| 891 |
56 |
be->authority); |
| 892 |
|
else |
| 893 |
10704 |
vep = be->endpoint = VRT_Endpoint_Clone(vep); |
| 894 |
|
|
| 895 |
10760 |
AN(vep); |
| 896 |
10760 |
be->conn_pool = VCP_Ref(vep, vbe_proto_ident); |
| 897 |
10760 |
AN(be->conn_pool); |
| 898 |
|
|
| 899 |
10760 |
vbp = vrt->probe; |
| 900 |
10760 |
if (vbp == NULL) |
| 901 |
10568 |
vbp = VCL_DefaultProbe(vcl); |
| 902 |
|
|
| 903 |
10760 |
if (vbp != NULL) { |
| 904 |
288 |
VBP_Insert(be, vbp, be->conn_pool); |
| 905 |
288 |
m = vbe_methods; |
| 906 |
288 |
} else { |
| 907 |
10472 |
be->sick = 0; |
| 908 |
10472 |
be->vsc->happy = UINT64_MAX; |
| 909 |
10472 |
m = vbe_methods_noprobe; |
| 910 |
|
} |
| 911 |
|
|
| 912 |
10760 |
Lck_Lock(&backends_mtx); |
| 913 |
10760 |
VSC_C_main->n_backend++; |
| 914 |
10760 |
Lck_Unlock(&backends_mtx); |
| 915 |
|
|
| 916 |
10760 |
be->director = VRT_AddDirector(ctx, m, be, "%s", vrt->vcl_name); |
| 917 |
|
|
| 918 |
10760 |
if (be->director == NULL) { |
| 919 |
0 |
vbe_free(be); |
| 920 |
0 |
return (NULL); |
| 921 |
|
} |
| 922 |
|
/* for cold VCL, update initial director state */ |
| 923 |
10760 |
if (be->probe != NULL) |
| 924 |
296 |
VBP_Update_Backend(be->probe); |
| 925 |
10760 |
return (be->director); |
| 926 |
10760 |
} |
| 927 |
|
|
| 928 |
|
VCL_BACKEND |
| 929 |
256 |
VRT_new_backend(VRT_CTX, const struct vrt_backend *vrt, VCL_BACKEND via) |
| 930 |
|
{ |
| 931 |
|
|
| 932 |
256 |
CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC); |
| 933 |
256 |
CHECK_OBJ_NOTNULL(vrt->endpoint, VRT_ENDPOINT_MAGIC); |
| 934 |
256 |
return (VRT_new_backend_clustered(ctx, NULL, vrt, via)); |
| 935 |
|
} |
| 936 |
|
|
| 937 |
|
/*-------------------------------------------------------------------- |
| 938 |
|
* Delete a dynamic director::backend instance. Undeleted dynamic and |
| 939 |
|
* static instances are GC'ed when the VCL is discarded (in cache_vcl.c) |
| 940 |
|
*/ |
| 941 |
|
|
| 942 |
|
void |
| 943 |
627 |
VRT_delete_backend(VRT_CTX, VCL_BACKEND *dp) |
| 944 |
|
{ |
| 945 |
|
|
| 946 |
627 |
(void)ctx; |
| 947 |
627 |
CHECK_OBJ_NOTNULL(*dp, DIRECTOR_MAGIC); |
| 948 |
627 |
VRT_DisableDirector(*dp); |
| 949 |
627 |
VRT_Assign_Backend(dp, NULL); |
| 950 |
627 |
} |
| 951 |
|
|
| 952 |
|
/*---------------------------------------------------------------------*/ |
| 953 |
|
|
| 954 |
|
void |
| 955 |
7631 |
VBE_InitCfg(void) |
| 956 |
|
{ |
| 957 |
|
|
| 958 |
7631 |
Lck_New(&backends_mtx, lck_vbe); |
| 959 |
7631 |
} |