varnish-cache/bin/varnishd/cache/cache_backend.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 * The director implementation for VCL backends.
31
 *
32
 */
33
34
#include "config.h"
35
36
#include <stdlib.h>
37
38
#include "cache_varnishd.h"
39
#include "cache_director.h"
40
41
#include "vtcp.h"
42
#include "vtim.h"
43
#include "vsa.h"
44
45
#include "cache_backend.h"
46
#include "cache_conn_pool.h"
47
#include "cache_transport.h"
48
#include "cache_vcl.h"
49
#include "http1/cache_http1.h"
50
#include "proxy/cache_proxy.h"
51
52
#include "VSC_vbe.h"
53
54
/*--------------------------------------------------------------------*/
55
56
enum connwait_e {
57
        CW_DO_CONNECT = 1,
58
        CW_QUEUED,
59
        CW_DEQUEUED,
60
        CW_BE_BUSY,
61
};
62
63
struct connwait {
64
        unsigned                        magic;
65
#define CONNWAIT_MAGIC                  0x75c7a52b
66
        enum connwait_e                 cw_state;
67
        VTAILQ_ENTRY(connwait)          cw_list;
68
        pthread_cond_t                  cw_cond;
69
};
70
71
static const char * const vbe_proto_ident = "HTTP Backend";
72
73
static struct lock backends_mtx;
74
75
/*--------------------------------------------------------------------*/
76
77
void
78 43
VBE_Connect_Error(struct VSC_vbe *vsc, int err)
79
{
80
81 43
        switch(err) {
82
        case 0:
83
                /*
84
                 * This is kind of brittle, but zero is the only
85
                 * value of errno we can trust to have no meaning.
86
                 */
87 20
                vsc->helddown++;
88 20
                break;
89
        case EACCES:
90
        case EPERM:
91 0
                vsc->fail_eacces++;
92 0
                break;
93
        case EADDRNOTAVAIL:
94 0
                vsc->fail_eaddrnotavail++;
95 0
                break;
96
        case ECONNREFUSED:
97 23
                vsc->fail_econnrefused++;
98 23
                break;
99
        case ENETUNREACH:
100 0
                vsc->fail_enetunreach++;
101 0
                break;
102
        case ETIMEDOUT:
103 0
                vsc->fail_etimedout++;
104 0
                break;
105
        default:
106 0
                vsc->fail_other++;
107 0
        }
108 43
}
109
110
/*--------------------------------------------------------------------*/
111
112
#define FIND_TMO(tmx, dst, bo, be)                                      \
113
        do {                                                            \
114
                CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);                   \
115
                dst = bo->tmx;                                          \
116
                if (isnan(dst) && be->tmx >= 0.0)                       \
117
                        dst = be->tmx;                                  \
118
                if (isnan(dst))                                         \
119
                        dst = cache_param->tmx;                         \
120
        } while (0)
121
122
#define FIND_BE_SPEC(tmx, dst, be, def)                                 \
123
        do {                                                            \
124
                CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);                   \
125
                dst = be->tmx;                                          \
126
                if (dst == def)                                         \
127
                        dst = cache_param->tmx;                         \
128
        } while (0)
129
130
#define FIND_BE_PARAM(tmx, dst, be)                                     \
131
        FIND_BE_SPEC(tmx, dst, be, 0)
132
133
#define FIND_BE_TMO(tmx, dst, be)                                       \
134
        FIND_BE_SPEC(tmx, dst, be, -1.0)
135
136
#define BE_BUSY(be)     \
137
        (be->max_connections > 0 && be->n_conn >= be->max_connections)
138
139
/*--------------------------------------------------------------------*/
140
141
static void
142 71
vbe_connwait_broadcast(const struct backend *bp)
143
{
144
        struct connwait *cw;
145
146 71
        CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
147
148 71
        Lck_Lock(bp->director->mtx);
149 80
        VTAILQ_FOREACH(cw, &bp->cw_head, cw_list) {
150 9
                CHECK_OBJ(cw, CONNWAIT_MAGIC);
151 9
                assert(cw->cw_state == CW_QUEUED);
152 9
                PTOK(pthread_cond_signal(&cw->cw_cond));
153 9
        }
154 71
        Lck_Unlock(bp->director->mtx);
155 71
}
156
157
static void
158 2193
vbe_connwait_signal_locked(const struct backend *bp)
159
{
160
        struct connwait *cw;
161
162 2193
        Lck_AssertHeld(bp->director->mtx);
163
164 2193
        if (bp->n_conn < bp->max_connections) {
165 11
                cw = VTAILQ_FIRST(&bp->cw_head);
166 11
                if (cw != NULL) {
167 1
                        CHECK_OBJ(cw, CONNWAIT_MAGIC);
168 1
                        assert(cw->cw_state == CW_QUEUED);
169 1
                        PTOK(pthread_cond_signal(&cw->cw_cond));
170 1
                }
171 11
        }
172 2193
}
173
174
static void
175 2207
vbe_connwait_fini(struct connwait *cw)
176
{
177 2207
        CHECK_OBJ_NOTNULL(cw, CONNWAIT_MAGIC);
178 2207
        assert(cw->cw_state != CW_QUEUED);
179 2207
        PTOK(pthread_cond_destroy(&cw->cw_cond));
180 2207
        FINI_OBJ(cw);
181 2207
}
182
183
/*--------------------------------------------------------------------
184
 * Get a connection to the backend
185
 *
186
 * note: wrk is a separate argument because it differs for pipe vs. fetch
187
 */
188
189
static struct pfd *
190 2212
vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp,
191
    unsigned force_fresh)
192
{
193
        struct busyobj *bo;
194
        struct pfd *pfd;
195
        int *fdp, err;
196
        vtim_dur tmod;
197
        char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE];
198
        char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE];
199
        unsigned wait_limit;
200
        vtim_dur wait_tmod;
201
        vtim_dur wait_end;
202
        struct connwait cw[1];
203
204 2212
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
205 2212
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
206 2212
        bo = ctx->bo;
207 2212
        CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
208 2212
        AN(bp->vsc);
209
210 2212
        if (!VRT_Healthy(ctx, dir, NULL)) {
211 10
                VSLb(bo->vsl, SLT_FetchError,
212 5
                     "backend %s: unhealthy", VRT_BACKEND_string(dir));
213 5
                bp->vsc->unhealthy++;
214 5
                VSC_C_main->backend_unhealthy++;
215 5
                return (NULL);
216
        }
217 2207
        INIT_OBJ(cw, CONNWAIT_MAGIC);
218 2207
        PTOK(pthread_cond_init(&cw->cw_cond, NULL));
219 2207
        Lck_Lock(bp->director->mtx);
220 2207
        FIND_BE_PARAM(backend_wait_limit, wait_limit, bp);
221 2207
        FIND_BE_TMO(backend_wait_timeout, wait_tmod, bp);
222 2207
        cw->cw_state = CW_DO_CONNECT;
223 2207
        if (!VTAILQ_EMPTY(&bp->cw_head) || BE_BUSY(bp))
224 14
                cw->cw_state = CW_BE_BUSY;
225
226 2219
        if (cw->cw_state == CW_BE_BUSY && wait_limit > 0 &&
227 12
            wait_tmod > 0.0 && bp->cw_count < wait_limit) {
228 12
                VTAILQ_INSERT_TAIL(&bp->cw_head, cw, cw_list);
229 12
                bp->cw_count++;
230 12
                VSC_C_main->backend_wait++;
231 12
                cw->cw_state = CW_QUEUED;
232 12
                wait_end = VTIM_real() + wait_tmod;
233 12
                do {
234 24
                        err = Lck_CondWaitUntil(&cw->cw_cond, bp->director->mtx,
235 12
                            wait_end);
236 12
                } while (err == EINTR);
237 12
                assert(cw->cw_state == CW_QUEUED);
238 12
                VTAILQ_REMOVE(&bp->cw_head, cw, cw_list);
239 12
                cw->cw_state = CW_DEQUEUED;
240 12
                bp->cw_count--;
241 12
                if ((err != 0 && BE_BUSY(bp)) || !VRT_Healthy(ctx, dir, NULL)) {
242 11
                        VSC_C_main->backend_wait_fail++;
243 11
                        cw->cw_state = CW_BE_BUSY;
244 11
                }
245 12
        }
246 2207
        if (cw->cw_state != CW_BE_BUSY)
247 2194
                bp->n_conn++;
248
249 2207
        if (!VTAILQ_EMPTY(&bp->cw_head) && !BE_BUSY(bp)) {
250
                /* Signal the new head of the waiting queue */
251 0
                vbe_connwait_signal_locked(bp);
252 0
        }
253
254 2207
        Lck_Unlock(bp->director->mtx);
255
256 2207
        if (cw->cw_state == CW_BE_BUSY) {
257 26
                VSLb(bo->vsl, SLT_FetchError,
258 13
                     "backend %s: busy", VRT_BACKEND_string(dir));
259 13
                bp->vsc->busy++;
260 13
                VSC_C_main->backend_busy++;
261 13
                vbe_connwait_fini(cw);
262 13
                return (NULL);
263
        }
264
265 2194
        AZ(bo->htc);
266 2194
        bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc);
267
        /* XXX: we may want to detect the ws overflow sooner */
268 2194
        if (bo->htc == NULL) {
269 31
                VSLb(bo->vsl, SLT_FetchError, "out of workspace");
270
                /* XXX: counter ? */
271 31
                Lck_Lock(bp->director->mtx);
272 31
                bp->n_conn--;
273 31
                vbe_connwait_signal_locked(bp);
274 31
                Lck_Unlock(bp->director->mtx);
275 31
                vbe_connwait_fini(cw);
276 31
                return (NULL);
277
        }
278 2163
        bo->htc->doclose = SC_NULL;
279 2163
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
280
281 2163
        FIND_TMO(connect_timeout, tmod, bo, bp);
282 2163
        pfd = VCP_Get(bp->conn_pool, tmod, wrk, force_fresh, &err);
283 2163
        if (pfd == NULL) {
284 31
                Lck_Lock(bp->director->mtx);
285 31
                VBE_Connect_Error(bp->vsc, err);
286 31
                bp->n_conn--;
287 31
                vbe_connwait_signal_locked(bp);
288 31
                Lck_Unlock(bp->director->mtx);
289 62
                VSLb(bo->vsl, SLT_FetchError,
290
                     "backend %s: fail errno %d (%s)",
291 31
                     VRT_BACKEND_string(dir), err, VAS_errtxt(err));
292 31
                VSC_C_main->backend_fail++;
293 31
                bo->htc = NULL;
294 31
                vbe_connwait_fini(cw);
295 31
                return (NULL);
296
        }
297
298 2132
        VSLb_ts_busyobj(bo, "Connected", W_TIM_real(wrk));
299 2132
        fdp = PFD_Fd(pfd);
300 2132
        AN(fdp);
301 2132
        assert(*fdp >= 0);
302
303 2132
        Lck_Lock(bp->director->mtx);
304 2132
        bp->vsc->conn++;
305 2132
        bp->vsc->req++;
306 2132
        Lck_Unlock(bp->director->mtx);
307
308 2132
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
309
310 2132
        err = 0;
311 2132
        if (bp->proxy_header != 0)
312 5
                err += VPX_Send_Proxy(*fdp, bp->proxy_header, bo->sp);
313 2132
        if (err < 0) {
314 0
                VSLb(bo->vsl, SLT_FetchError,
315
                     "backend %s: proxy write errno %d (%s)",
316 0
                     VRT_BACKEND_string(dir),
317 0
                     errno, VAS_errtxt(errno));
318
                // account as if connect failed - good idea?
319 0
                VSC_C_main->backend_fail++;
320 0
                bo->htc = NULL;
321 0
                VCP_Close(&pfd);
322 0
                AZ(pfd);
323 0
                Lck_Lock(bp->director->mtx);
324 0
                bp->n_conn--;
325 0
                bp->vsc->conn--;
326 0
                bp->vsc->req--;
327 0
                vbe_connwait_signal_locked(bp);
328 0
                Lck_Unlock(bp->director->mtx);
329 0
                vbe_connwait_fini(cw);
330 0
                return (NULL);
331
        }
332 2132
        bo->acct.bereq_hdrbytes += err;
333
334 2132
        PFD_LocalName(pfd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
335 2132
        PFD_RemoteName(pfd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
336 4264
        VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s %s",
337 2132
            *fdp, VRT_BACKEND_string(dir), abuf2, pbuf2, abuf1, pbuf1,
338 2132
            PFD_State(pfd) == PFD_STATE_STOLEN ? "reuse" : "connect");
339
340 2132
        INIT_OBJ(bo->htc, HTTP_CONN_MAGIC);
341 2132
        bo->htc->priv = pfd;
342 2132
        bo->htc->rfd = fdp;
343 2132
        bo->htc->doclose = SC_NULL;
344 2132
        FIND_TMO(first_byte_timeout,
345
            bo->htc->first_byte_timeout, bo, bp);
346 2132
        FIND_TMO(between_bytes_timeout,
347
            bo->htc->between_bytes_timeout, bo, bp);
348 2132
        vbe_connwait_fini(cw);
349 2132
        return (pfd);
350 2212
}
351
352
static void v_matchproto_(vdi_finish_f)
353 2131
vbe_dir_finish(VRT_CTX, VCL_BACKEND d)
354
{
355
        struct backend *bp;
356
        struct busyobj *bo;
357
        struct pfd *pfd;
358
359 2131
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
360 2131
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
361 2131
        bo = ctx->bo;
362 2131
        CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
363 2131
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
364
365 2131
        CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
366 2131
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
367
368 2131
        pfd = bo->htc->priv;
369 2131
        bo->htc->priv = NULL;
370 2131
        if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) {
371 768
                VSLb(bo->vsl, SLT_BackendClose, "%d %s close %s", *PFD_Fd(pfd),
372 384
                    VRT_BACKEND_string(d), bo->htc->doclose->name);
373 384
                VCP_Close(&pfd);
374 384
                AZ(pfd);
375 384
                Lck_Lock(bp->director->mtx);
376 384
        } else {
377 1747
                assert (PFD_State(pfd) == PFD_STATE_USED);
378 3494
                VSLb(bo->vsl, SLT_BackendClose, "%d %s recycle", *PFD_Fd(pfd),
379 1747
                    VRT_BACKEND_string(d));
380 1747
                Lck_Lock(bp->director->mtx);
381 1747
                VSC_C_main->backend_recycle++;
382 1747
                VCP_Recycle(bo->wrk, &pfd);
383
        }
384 2131
        assert(bp->n_conn > 0);
385 2131
        bp->n_conn--;
386 2131
        AN(bp->vsc);
387 2131
        bp->vsc->conn--;
388
#define ACCT(foo)       bp->vsc->foo += bo->acct.foo;
389
#include "tbl/acct_fields_bereq.h"
390
        vbe_connwait_signal_locked(bp);
391
        Lck_Unlock(bp->director->mtx);
392
        bo->htc = NULL;
393
}
394
395
static int v_matchproto_(vdi_gethdrs_f)
396 2182
vbe_dir_gethdrs(VRT_CTX, VCL_BACKEND d)
397
{
398 2182
        int i, extrachance = 1;
399
        struct backend *bp;
400
        struct pfd *pfd;
401
        struct busyobj *bo;
402
        struct worker *wrk;
403
404 2182
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
405 2182
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
406 2182
        bo = ctx->bo;
407 2182
        CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
408 2182
        CHECK_OBJ_NOTNULL(bo->bereq, HTTP_MAGIC);
409 2182
        if (bo->htc != NULL)
410 0
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
411 2182
        wrk = ctx->bo->wrk;
412 2182
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
413 2182
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
414
415
        /*
416
         * Now that we know our backend, we can set a default Host:
417
         * header if one is necessary.  This cannot be done in the VCL
418
         * because the backend may be chosen by a director.
419
         */
420 2182
        if (!http_GetHdr(bo->bereq, H_Host, NULL) && bp->hosthdr != NULL)
421 68
                http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr);
422
423 2182
        do {
424 2187
                if (bo->htc != NULL)
425 0
                        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
426 2187
                pfd = vbe_dir_getfd(ctx, wrk, d, bp, extrachance == 0 ? 1 : 0);
427 2187
                if (pfd == NULL)
428 79
                        return (-1);
429 2108
                AN(bo->htc);
430 2108
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
431 2108
                if (PFD_State(pfd) != PFD_STATE_STOLEN)
432 1316
                        extrachance = 0;
433
434 4216
                i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes,
435 2108
                    &bo->acct.bereq_bodybytes);
436
437 2108
                if (i == 0 && PFD_State(pfd) != PFD_STATE_USED) {
438 2373
                        if (VCP_Wait(wrk, pfd, VTIM_real() +
439 1582
                            bo->htc->first_byte_timeout) != 0) {
440 1
                                bo->htc->doclose = SC_RX_TIMEOUT;
441 1
                                VSLb(bo->vsl, SLT_FetchError,
442
                                     "first byte timeout (reused connection)");
443 1
                                extrachance = 0;
444 1
                        }
445 791
                }
446
447 2108
                if (bo->htc->doclose == SC_NULL) {
448 2074
                        assert(PFD_State(pfd) == PFD_STATE_USED);
449 2074
                        if (i == 0)
450 2074
                                i = V1F_FetchRespHdr(bo);
451 2074
                        if (i == 0) {
452 2000
                                AN(bo->htc->priv);
453 2000
                                http_VSL_log(bo->beresp);
454 2000
                                return (0);
455
                        }
456 74
                }
457 108
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
458
459
                /*
460
                 * If we recycled a backend connection, there is a finite chance
461
                 * that the backend closed it before we got the bereq to it.
462
                 * In that case do a single automatic retry if req.body allows.
463
                 */
464 108
                vbe_dir_finish(ctx, d);
465 108
                AZ(bo->htc);
466 108
                if (i < 0 || extrachance == 0)
467 103
                        break;
468 5
                if (bo->no_retry != NULL)
469 0
                        break;
470 5
                VSC_C_main->backend_retry++;
471 5
        } while (extrachance--);
472 103
        return (-1);
473 2182
}
474
475
static VCL_IP v_matchproto_(vdi_getip_f)
476 1
vbe_dir_getip(VRT_CTX, VCL_BACKEND d)
477
{
478
        struct pfd *pfd;
479
480 1
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
481 1
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
482 1
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
483 1
        CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC);
484 1
        pfd = ctx->bo->htc->priv;
485
486 1
        return (VCP_GetIp(pfd));
487
}
488
489
/*--------------------------------------------------------------------*/
490
491
static stream_close_t v_matchproto_(vdi_http1pipe_f)
492 25
vbe_dir_http1pipe(VRT_CTX, VCL_BACKEND d)
493
{
494
        int i;
495
        stream_close_t retval;
496
        struct backend *bp;
497
        struct v1p_acct v1a;
498
        struct pfd *pfd;
499
        vtim_real deadline;
500
501 25
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
502 25
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
503 25
        CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC);
504 25
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
505 25
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
506
507 25
        memset(&v1a, 0, sizeof v1a);
508
509
        /* This is hackish... */
510 25
        v1a.req = ctx->req->acct.req_hdrbytes;
511 25
        ctx->req->acct.req_hdrbytes = 0;
512
513 25
        ctx->req->res_pipe = 1;
514
515 25
        retval = SC_TX_ERROR;
516 25
        pfd = vbe_dir_getfd(ctx, ctx->req->wrk, d, bp, 0);
517
518 25
        if (pfd != NULL) {
519 24
                CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC);
520 48
                i = V1F_SendReq(ctx->req->wrk, ctx->bo,
521 24
                    &v1a.bereq, &v1a.out);
522 24
                VSLb_ts_req(ctx->req, "Pipe", W_TIM_real(ctx->req->wrk));
523 24
                if (i == 0) {
524 24
                        deadline = ctx->bo->task_deadline;
525 24
                        if (isnan(deadline))
526 23
                                deadline = cache_param->pipe_task_deadline;
527 24
                        if (deadline > 0.)
528 3
                                deadline += ctx->req->sp->t_idle;
529 48
                        retval = V1P_Process(ctx->req, *PFD_Fd(pfd), &v1a,
530 24
                            deadline);
531 24
                }
532 24
                VSLb_ts_req(ctx->req, "PipeSess", W_TIM_real(ctx->req->wrk));
533 24
                ctx->bo->htc->doclose = retval;
534 24
                vbe_dir_finish(ctx, d);
535 24
        }
536 25
        V1P_Charge(ctx->req, &v1a, bp->vsc);
537 25
        CHECK_OBJ_NOTNULL(retval, STREAM_CLOSE_MAGIC);
538 25
        return (retval);
539
}
540
541
/*--------------------------------------------------------------------*/
542
543
static void
544 1548
vbe_dir_event(const struct director *d, enum vcl_event_e ev)
545
{
546
        struct backend *bp;
547
548 1548
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
549 1548
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
550
551 1548
        if (ev == VCL_EVENT_WARM) {
552 1317
                VRT_VSC_Reveal(bp->vsc_seg);
553 1317
                if (bp->probe != NULL)
554 37
                        VBP_Control(bp, 1);
555 1548
        } else if (ev == VCL_EVENT_COLD) {
556 83
                if (bp->probe != NULL)
557 7
                        VBP_Control(bp, 0);
558 83
                VRT_VSC_Hide(bp->vsc_seg);
559 231
        } else if (ev == VCL_EVENT_DISCARD) {
560 67
                VRT_DelDirector(&bp->director);
561 148
        } else if (ev == VDI_EVENT_SICK) {
562 81
                const struct vdi_ahealth *ah = d->vdir->admin_health;
563
564 81
                if (ah == VDI_AH_SICK || (ah == VDI_AH_AUTO && bp->sick))
565 71
                        vbe_connwait_broadcast(bp);
566 81
        }
567 1548
}
568
569
/*---------------------------------------------------------------------*/
570
571
static void
572 81
vbe_free(struct backend *be)
573
{
574
575 81
        CHECK_OBJ_NOTNULL(be, BACKEND_MAGIC);
576
577 81
        if (be->probe != NULL)
578 6
                VBP_Remove(be);
579
580 81
        VSC_vbe_Destroy(&be->vsc_seg);
581 81
        Lck_Lock(&backends_mtx);
582 81
        VSC_C_main->n_backend--;
583 81
        Lck_Unlock(&backends_mtx);
584 81
        VCP_Rel(&be->conn_pool);
585
586
#define DA(x)   do { if (be->x != NULL) free(be->x); } while (0)
587
#define DN(x)   /**/
588 81
        VRT_BACKEND_HANDLE();
589
#undef DA
590
#undef DN
591 81
        free(be->endpoint);
592
593 81
        assert(VTAILQ_EMPTY(&be->cw_head));
594 81
        FREE_OBJ(be);
595 81
}
596
597
static void v_matchproto_(vdi_destroy_f)
598 81
vbe_destroy(const struct director *d)
599
{
600
        struct backend *be;
601
602 81
        CAST_OBJ_NOTNULL(be, d->priv, BACKEND_MAGIC);
603 81
        vbe_free(be);
604 81
}
605
606
/*--------------------------------------------------------------------*/
607
608
static void
609 6
vbe_panic(const struct director *d, struct vsb *vsb)
610
{
611
        struct backend *bp;
612
613 6
        PAN_CheckMagic(vsb, d, DIRECTOR_MAGIC);
614 6
        bp = d->priv;
615 6
        PAN_CheckMagic(vsb, bp, BACKEND_MAGIC);
616
617 6
        VCP_Panic(vsb, bp->conn_pool);
618 6
        VSB_printf(vsb, "hosthdr = %s,\n", bp->hosthdr);
619 6
        VSB_printf(vsb, "n_conn = %u,\n", bp->n_conn);
620 6
}
621
622
/*--------------------------------------------------------------------
623
 */
624
625
static void v_matchproto_(vdi_list_f)
626 1149
vbe_list(VRT_CTX, const struct director *d, struct vsb *vsb, int pflag,
627
    int jflag)
628
{
629
        char buf[VTCP_ADDRBUFSIZE];
630
        struct backend *bp;
631
        struct vrt_endpoint *vep;
632
633 1149
        (void)ctx;
634
635 1149
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
636 1149
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
637 1149
        CHECK_OBJ_NOTNULL(bp->endpoint, VRT_ENDPOINT_MAGIC);
638
639 1149
        vep = bp->endpoint;
640
641 1149
        if (bp->probe != NULL)
642 78
                VBP_Status(vsb, bp, pflag, jflag);
643 1071
        else if (jflag && pflag)
644 3
                VSB_cat(vsb, "{},\n");
645 1068
        else if (jflag)
646 11
                VSB_cat(vsb, "[0, 0, \"healthy\"]");
647 1057
        else if (pflag)
648 9
                return;
649
        else
650 1048
                VSB_cat(vsb, "0/0\thealthy");
651
652 1140
        if (jflag && pflag) {
653 5
                if (vep->ipv4 != NULL) {
654 5
                        VTCP_name(vep->ipv4, buf, sizeof buf, NULL, 0);
655 5
                        VSB_printf(vsb, "\"ipv4\": \"%s\",\n", buf);
656 5
                }
657 5
                if (vep->ipv6 != NULL) {
658 0
                        VTCP_name(vep->ipv6, buf, sizeof buf, NULL, 0);
659 0
                        VSB_printf(vsb, "\"ipv6\": \"%s\",\n", buf);
660 0
                }
661 5
        }
662 1149
}
663
664
/*--------------------------------------------------------------------
665
 */
666
667
static VCL_BOOL v_matchproto_(vdi_healthy_f)
668 67
vbe_healthy(VRT_CTX, VCL_BACKEND d, VCL_TIME *t)
669
{
670
        struct backend *bp;
671
672 67
        (void)ctx;
673 67
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
674 67
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
675
676 67
        if (t != NULL)
677 42
                *t = bp->changed;
678
679 67
        return (!bp->sick);
680
}
681
682
/*--------------------------------------------------------------------
683
 */
684
685
static const struct vdi_methods vbe_methods[1] = {{
686
        .magic =                VDI_METHODS_MAGIC,
687
        .type =                 "backend",
688
        .http1pipe =            vbe_dir_http1pipe,
689
        .gethdrs =              vbe_dir_gethdrs,
690
        .getip =                vbe_dir_getip,
691
        .finish =               vbe_dir_finish,
692
        .event =                vbe_dir_event,
693
        .destroy =              vbe_destroy,
694
        .panic =                vbe_panic,
695
        .list =                 vbe_list,
696
        .healthy =              vbe_healthy
697
}};
698
699
static const struct vdi_methods vbe_methods_noprobe[1] = {{
700
        .magic =                VDI_METHODS_MAGIC,
701
        .type =                 "backend",
702
        .http1pipe =            vbe_dir_http1pipe,
703
        .gethdrs =              vbe_dir_gethdrs,
704
        .getip =                vbe_dir_getip,
705
        .finish =               vbe_dir_finish,
706
        .event =                vbe_dir_event,
707
        .destroy =              vbe_destroy,
708
        .panic =                vbe_panic,
709
        .list =                 vbe_list
710
}};
711
712
/*--------------------------------------------------------------------
713
 * Create a new static or dynamic director::backend instance.
714
 */
715
716
size_t
717 1217
VRT_backend_vsm_need(VRT_CTX)
718
{
719 1217
        (void)ctx;
720 1217
        return (VRT_VSC_Overhead(VSC_vbe_size));
721
}
722
723
/*
724
 * The new_backend via parameter is a VCL_BACKEND, but we need a (struct
725
 * backend)
726
 *
727
 * For now, we resolve it when creating the backend, which implies no redundancy
728
 * / load balancing across the via director if it is more than a simple backend.
729
 */
730
731
static const struct backend *
732 8
via_resolve(VRT_CTX, const struct vrt_endpoint *vep, VCL_BACKEND via)
733
{
734 8
        const struct backend *viabe = NULL;
735
736 8
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
737 8
        CHECK_OBJ_NOTNULL(via, DIRECTOR_MAGIC);
738
739 8
        if (vep->uds_path) {
740 0
                VRT_fail(ctx, "Via is only supported for IP addresses");
741 0
                return (NULL);
742
        }
743
744 8
        via = VRT_DirectorResolve(ctx, via);
745
746 8
        if (via == NULL) {
747 0
                VRT_fail(ctx, "Via resolution failed");
748 0
                return (NULL);
749
        }
750
751 8
        CHECK_OBJ(via, DIRECTOR_MAGIC);
752 8
        CHECK_OBJ_NOTNULL(via->vdir, VCLDIR_MAGIC);
753
754 8
        if (via->vdir->methods == vbe_methods ||
755 8
            via->vdir->methods == vbe_methods_noprobe)
756 8
                CAST_OBJ_NOTNULL(viabe, via->priv, BACKEND_MAGIC);
757
758 8
        if (viabe == NULL)
759 0
                VRT_fail(ctx, "Via does not resolve to a backend");
760
761 8
        return (viabe);
762 8
}
763
764
/*
765
 * construct a new endpoint identical to vep with sa in a proxy header
766
 */
767
static struct vrt_endpoint *
768 8
via_endpoint(const struct vrt_endpoint *vep, const struct suckaddr *sa,
769
    const char *auth)
770
{
771
        struct vsb *preamble;
772
        struct vrt_blob blob[1];
773
        struct vrt_endpoint *nvep, *ret;
774
        const struct suckaddr *client_bogo;
775
776 8
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
777 8
        AN(sa);
778
779 8
        nvep = VRT_Endpoint_Clone(vep);
780 8
        CHECK_OBJ_NOTNULL(nvep, VRT_ENDPOINT_MAGIC);
781
782 8
        if (VSA_Get_Proto(sa) == AF_INET6)
783 0
                client_bogo = bogo_ip6;
784
        else
785 8
                client_bogo = bogo_ip;
786
787 8
        preamble = VSB_new_auto();
788 8
        AN(preamble);
789 8
        VPX_Format_Proxy(preamble, 2, client_bogo, sa, auth);
790 8
        INIT_OBJ(blob, VRT_BLOB_MAGIC);
791 8
        blob->blob = VSB_data(preamble);
792 8
        blob->len = VSB_len(preamble);
793 8
        nvep->preamble = blob;
794 8
        ret = VRT_Endpoint_Clone(nvep);
795 8
        CHECK_OBJ_NOTNULL(ret, VRT_ENDPOINT_MAGIC);
796 8
        VSB_destroy(&preamble);
797 8
        FREE_OBJ(nvep);
798
799 8
        return (ret);
800
}
801
802
VCL_BACKEND
803 1324
VRT_new_backend_clustered(VRT_CTX, struct vsmw_cluster *vc,
804
    const struct vrt_backend *vrt, VCL_BACKEND via)
805
{
806
        struct backend *be;
807
        struct vcl *vcl;
808
        const struct vrt_backend_probe *vbp;
809
        const struct vrt_endpoint *vep;
810
        const struct vdi_methods *m;
811 1324
        const struct suckaddr *sa = NULL;
812
        char abuf[VTCP_ADDRBUFSIZE];
813 1324
        const struct backend *viabe = NULL;
814
815 1324
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
816 1324
        CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC);
817 1324
        vep = vrt->endpoint;
818 1324
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
819 1324
        if (vep->uds_path == NULL) {
820 1278
                if (vep->ipv4 == NULL && vep->ipv6 == NULL) {
821 0
                        VRT_fail(ctx, "%s: Illegal IP", __func__);
822 0
                        return (NULL);
823
                }
824 1278
        } else {
825 46
                assert(vep->ipv4== NULL && vep->ipv6== NULL);
826
        }
827
828 1324
        if (via != NULL) {
829 8
                viabe = via_resolve(ctx, vep, via);
830 8
                if (viabe == NULL)
831 0
                        return (NULL);
832 8
        }
833
834 1324
        vcl = ctx->vcl;
835 1324
        AN(vcl);
836 1324
        AN(vrt->vcl_name);
837
838
        /* Create new backend */
839 1324
        ALLOC_OBJ(be, BACKEND_MAGIC);
840 1324
        if (be == NULL)
841 0
                return (NULL);
842 1324
        VTAILQ_INIT(&be->cw_head);
843
844
#define DA(x)   do { if (vrt->x != NULL) REPLACE((be->x), (vrt->x)); } while (0)
845
#define DN(x)   do { be->x = vrt->x; } while (0)
846 1324
        VRT_BACKEND_HANDLE();
847
#undef DA
848
#undef DN
849
850
#define CPTMO(a, b, x) do {                             \
851
                if ((a)->x < 0.0 || isnan((a)->x))      \
852
                        (a)->x = (b)->x;                \
853
        } while(0)
854
855 1322
        if (viabe != NULL) {
856 8
                CPTMO(be, viabe, connect_timeout);
857 8
                CPTMO(be, viabe, first_byte_timeout);
858 8
                CPTMO(be, viabe, between_bytes_timeout);
859 8
        }
860
#undef CPTMO
861
862 1322
        if (viabe || be->hosthdr == NULL) {
863 9
                if (vrt->endpoint->uds_path != NULL)
864 1
                        sa = bogo_ip;
865 8
                else if (cache_param->prefer_ipv6 && vep->ipv6 != NULL)
866 0
                        sa = vep->ipv6;
867 8
                else if (vep->ipv4!= NULL)
868 8
                        sa = vep->ipv4;
869
                else
870 0
                        sa = vep->ipv6;
871 9
                if (be->hosthdr == NULL) {
872 1
                        VTCP_name(sa, abuf, sizeof abuf, NULL, 0);
873 1
                        REPLACE(be->hosthdr, abuf);
874 1
                }
875 9
        }
876
877 2644
        be->vsc = VSC_vbe_New(vc, &be->vsc_seg,
878 1322
            "%s.%s", VCL_Name(ctx->vcl), vrt->vcl_name);
879 1322
        AN(be->vsc);
880 1322
        if (! vcl->temp->is_warm)
881 1309
                VRT_VSC_Hide(be->vsc_seg);
882
883 1322
        if (viabe)
884 14
                vep = be->endpoint = via_endpoint(viabe->endpoint, sa,
885 7
                    be->authority);
886
        else
887 1315
                vep = be->endpoint = VRT_Endpoint_Clone(vep);
888
889 1322
        AN(vep);
890 1322
        be->conn_pool = VCP_Ref(vep, vbe_proto_ident);
891 1322
        AN(be->conn_pool);
892
893 1322
        vbp = vrt->probe;
894 1322
        if (vbp == NULL)
895 1298
                vbp = VCL_DefaultProbe(vcl);
896
897 1322
        if (vbp != NULL) {
898 36
                VBP_Insert(be, vbp, be->conn_pool);
899 36
                m = vbe_methods;
900 36
        } else {
901 1286
                be->sick = 0;
902 1286
                be->vsc->happy = UINT64_MAX;
903 1286
                m = vbe_methods_noprobe;
904
        }
905
906 1322
        Lck_Lock(&backends_mtx);
907 1322
        VSC_C_main->n_backend++;
908 1322
        Lck_Unlock(&backends_mtx);
909
910 1322
        be->director = VRT_AddDirector(ctx, m, be, "%s", vrt->vcl_name);
911
912 1322
        if (be->director == NULL) {
913 0
                vbe_free(be);
914 0
                return (NULL);
915
        }
916
        /* for cold VCL, update initial director state */
917 1322
        if (be->probe != NULL)
918 37
                VBP_Update_Backend(be->probe);
919 1322
        return (be->director);
920 1322
}
921
922
VCL_BACKEND
923 32
VRT_new_backend(VRT_CTX, const struct vrt_backend *vrt, VCL_BACKEND via)
924
{
925
926 32
        CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC);
927 32
        CHECK_OBJ_NOTNULL(vrt->endpoint, VRT_ENDPOINT_MAGIC);
928 32
        return (VRT_new_backend_clustered(ctx, NULL, vrt, via));
929
}
930
931
/*--------------------------------------------------------------------
932
 * Delete a dynamic director::backend instance.  Undeleted dynamic and
933
 * static instances are GC'ed when the VCL is discarded (in cache_vcl.c)
934
 */
935
936
void
937 79
VRT_delete_backend(VRT_CTX, VCL_BACKEND *dp)
938
{
939
940 79
        (void)ctx;
941 79
        CHECK_OBJ_NOTNULL(*dp, DIRECTOR_MAGIC);
942 79
        VRT_DisableDirector(*dp);
943 79
        VRT_Assign_Backend(dp, NULL);
944 79
}
945
946
/*---------------------------------------------------------------------*/
947
948
void
949 939
VBE_InitCfg(void)
950
{
951
952 939
        Lck_New(&backends_mtx, lck_vbe);
953 939
}