varnish-cache/bin/varnishd/cache/cache_backend.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 * The director implementation for VCL backends.
31
 *
32
 */
33
34
#include "config.h"
35
36
#include <stdlib.h>
37
38
#include "cache_varnishd.h"
39
#include "cache_director.h"
40
41
#include "vtcp.h"
42
#include "vtim.h"
43
#include "vsa.h"
44
45
#include "cache_backend.h"
46
#include "cache_conn_pool.h"
47
#include "cache_transport.h"
48
#include "cache_vcl.h"
49
#include "http1/cache_http1.h"
50
#include "proxy/cache_proxy.h"
51
52
#include "VSC_vbe.h"
53
54
/*--------------------------------------------------------------------*/
55
56
enum connwait_e {
57
        CW_DO_CONNECT = 1,
58
        CW_QUEUED,
59
        CW_DEQUEUED,
60
        CW_BE_BUSY,
61
};
62
63
struct connwait {
64
        unsigned                        magic;
65
#define CONNWAIT_MAGIC                  0x75c7a52b
66
        enum connwait_e                 cw_state;
67
        VTAILQ_ENTRY(connwait)          cw_list;
68
        pthread_cond_t                  cw_cond;
69
};
70
71
static const char * const vbe_proto_ident = "HTTP Backend";
72
73
static struct lock backends_mtx;
74
75
/*--------------------------------------------------------------------*/
76
77
void
78 45
VBE_Connect_Error(struct VSC_vbe *vsc, int err)
79
{
80
81 45
        switch(err) {
82
        case 0:
83
                /*
84
                 * This is kind of brittle, but zero is the only
85
                 * value of errno we can trust to have no meaning.
86
                 */
87 19
                vsc->helddown++;
88 19
                break;
89
        case EACCES:
90
        case EPERM:
91 0
                vsc->fail_eacces++;
92 0
                break;
93
        case EADDRNOTAVAIL:
94 0
                vsc->fail_eaddrnotavail++;
95 0
                break;
96
        case ECONNREFUSED:
97 26
                vsc->fail_econnrefused++;
98 26
                break;
99
        case ENETUNREACH:
100 0
                vsc->fail_enetunreach++;
101 0
                break;
102
        case ETIMEDOUT:
103 0
                vsc->fail_etimedout++;
104 0
                break;
105
        default:
106 0
                vsc->fail_other++;
107 0
        }
108 45
}
109
110
/*--------------------------------------------------------------------*/
111
112
#define FIND_TMO(tmx, dst, bo, be)                                      \
113
        do {                                                            \
114
                CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);                   \
115
                dst = bo->tmx;                                          \
116
                if (isnan(dst) && be->tmx >= 0.0)                       \
117
                        dst = be->tmx;                                  \
118
                if (isnan(dst))                                         \
119
                        dst = cache_param->tmx;                         \
120
        } while (0)
121
122
#define FIND_BE_SPEC(tmx, dst, be, def)                                 \
123
        do {                                                            \
124
                CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);                   \
125
                dst = be->tmx;                                          \
126
                if (dst == def)                                         \
127
                        dst = cache_param->tmx;                         \
128
        } while (0)
129
130
#define FIND_BE_PARAM(tmx, dst, be)                                     \
131
        FIND_BE_SPEC(tmx, dst, be, 0)
132
133
#define FIND_BE_TMO(tmx, dst, be)                                       \
134
        FIND_BE_SPEC(tmx, dst, be, -1.0)
135
136
#define BE_BUSY(be)     \
137
        (be->max_connections > 0 && be->n_conn >= be->max_connections)
138
139
/*--------------------------------------------------------------------*/
140
141
static void
142 71
vbe_connwait_broadcast(const struct backend *bp)
143
{
144
        struct connwait *cw;
145
146 71
        CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
147
148 71
        Lck_Lock(bp->director->mtx);
149 80
        VTAILQ_FOREACH(cw, &bp->cw_head, cw_list) {
150 9
                CHECK_OBJ(cw, CONNWAIT_MAGIC);
151 9
                assert(cw->cw_state == CW_QUEUED);
152 9
                PTOK(pthread_cond_signal(&cw->cw_cond));
153 9
        }
154 71
        Lck_Unlock(bp->director->mtx);
155 71
}
156
157
static void
158 2194
vbe_connwait_signal_locked(const struct backend *bp)
159
{
160
        struct connwait *cw;
161
162 2194
        Lck_AssertHeld(bp->director->mtx);
163
164 2194
        if (bp->n_conn < bp->max_connections) {
165 11
                cw = VTAILQ_FIRST(&bp->cw_head);
166 11
                if (cw != NULL) {
167 1
                        CHECK_OBJ(cw, CONNWAIT_MAGIC);
168 1
                        assert(cw->cw_state == CW_QUEUED);
169 1
                        PTOK(pthread_cond_signal(&cw->cw_cond));
170 1
                }
171 11
        }
172 2194
}
173
174
static void
175 2208
vbe_connwait_fini(struct connwait *cw)
176
{
177 2208
        CHECK_OBJ_NOTNULL(cw, CONNWAIT_MAGIC);
178 2208
        assert(cw->cw_state != CW_QUEUED);
179 2208
        PTOK(pthread_cond_destroy(&cw->cw_cond));
180 2208
        FINI_OBJ(cw);
181 2208
}
182
183
/*--------------------------------------------------------------------
184
 * Get a connection to the backend
185
 *
186
 * note: wrk is a separate argument because it differs for pipe vs. fetch
187
 */
188
189
static struct pfd *
190 2213
vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp,
191
    unsigned force_fresh)
192
{
193
        struct busyobj *bo;
194
        struct pfd *pfd;
195
        int *fdp, err;
196
        vtim_dur tmod;
197
        char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE];
198
        char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE];
199
        unsigned wait_limit;
200
        vtim_dur wait_tmod;
201
        vtim_dur wait_end;
202
        struct connwait cw[1];
203
204 2213
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
205 2213
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
206 2213
        bo = ctx->bo;
207 2213
        CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
208 2213
        AN(bp->vsc);
209
210 2213
        if (!VRT_Healthy(ctx, dir, NULL)) {
211 10
                VSLb(bo->vsl, SLT_FetchError,
212 5
                     "backend %s: unhealthy", VRT_BACKEND_string(dir));
213 5
                bp->vsc->unhealthy++;
214 5
                VSC_C_main->backend_unhealthy++;
215 5
                return (NULL);
216
        }
217 2208
        INIT_OBJ(cw, CONNWAIT_MAGIC);
218 2208
        PTOK(pthread_cond_init(&cw->cw_cond, NULL));
219 2208
        Lck_Lock(bp->director->mtx);
220 2208
        FIND_BE_PARAM(backend_wait_limit, wait_limit, bp);
221 2208
        FIND_BE_TMO(backend_wait_timeout, wait_tmod, bp);
222 2208
        cw->cw_state = CW_DO_CONNECT;
223 2208
        if (!VTAILQ_EMPTY(&bp->cw_head) || BE_BUSY(bp))
224 14
                cw->cw_state = CW_BE_BUSY;
225
226 2220
        if (cw->cw_state == CW_BE_BUSY && wait_limit > 0 &&
227 12
            wait_tmod > 0.0 && bp->cw_count < wait_limit) {
228 12
                VTAILQ_INSERT_TAIL(&bp->cw_head, cw, cw_list);
229 12
                bp->cw_count++;
230 12
                VSC_C_main->backend_wait++;
231 12
                cw->cw_state = CW_QUEUED;
232 12
                wait_end = VTIM_real() + wait_tmod;
233 12
                do {
234 24
                        err = Lck_CondWaitUntil(&cw->cw_cond, bp->director->mtx,
235 12
                            wait_end);
236 12
                } while (err == EINTR);
237 12
                assert(cw->cw_state == CW_QUEUED);
238 12
                VTAILQ_REMOVE(&bp->cw_head, cw, cw_list);
239 12
                cw->cw_state = CW_DEQUEUED;
240 12
                bp->cw_count--;
241 12
                if ((err != 0 && BE_BUSY(bp)) || !VRT_Healthy(ctx, dir, NULL)) {
242 11
                        VSC_C_main->backend_wait_fail++;
243 11
                        cw->cw_state = CW_BE_BUSY;
244 11
                }
245 12
        }
246 2208
        if (cw->cw_state != CW_BE_BUSY)
247 2195
                bp->n_conn++;
248
249 2208
        if (!VTAILQ_EMPTY(&bp->cw_head) && !BE_BUSY(bp)) {
250
                /* Signal the new head of the waiting queue */
251 0
                vbe_connwait_signal_locked(bp);
252 0
        }
253
254 2208
        Lck_Unlock(bp->director->mtx);
255
256 2208
        if (cw->cw_state == CW_BE_BUSY) {
257 26
                VSLb(bo->vsl, SLT_FetchError,
258 13
                     "backend %s: busy", VRT_BACKEND_string(dir));
259 13
                bp->vsc->busy++;
260 13
                VSC_C_main->backend_busy++;
261 13
                vbe_connwait_fini(cw);
262 13
                return (NULL);
263
        }
264
265 2195
        AZ(bo->htc);
266 2195
        bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc);
267
        /* XXX: we may want to detect the ws overflow sooner */
268 2195
        if (bo->htc == NULL) {
269 31
                VSLb(bo->vsl, SLT_FetchError, "out of workspace");
270
                /* XXX: counter ? */
271 31
                Lck_Lock(bp->director->mtx);
272 31
                bp->n_conn--;
273 31
                vbe_connwait_signal_locked(bp);
274 31
                Lck_Unlock(bp->director->mtx);
275 31
                vbe_connwait_fini(cw);
276 31
                return (NULL);
277
        }
278 2164
        bo->htc->doclose = SC_NULL;
279 2164
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
280
281 2164
        FIND_TMO(connect_timeout, tmod, bo, bp);
282 2164
        pfd = VCP_Get(bp->conn_pool, tmod, wrk, force_fresh, &err);
283 2164
        if (pfd == NULL) {
284 31
                Lck_Lock(bp->director->mtx);
285 31
                VBE_Connect_Error(bp->vsc, err);
286 31
                bp->n_conn--;
287 31
                vbe_connwait_signal_locked(bp);
288 31
                Lck_Unlock(bp->director->mtx);
289 62
                VSLb(bo->vsl, SLT_FetchError,
290
                     "backend %s: fail errno %d (%s)",
291 31
                     VRT_BACKEND_string(dir), err, VAS_errtxt(err));
292 31
                VSC_C_main->backend_fail++;
293 31
                bo->htc = NULL;
294 31
                vbe_connwait_fini(cw);
295 31
                return (NULL);
296
        }
297
298 2133
        VSLb_ts_busyobj(bo, "Connected", W_TIM_real(wrk));
299 2133
        fdp = PFD_Fd(pfd);
300 2133
        AN(fdp);
301 2133
        assert(*fdp >= 0);
302
303 2133
        Lck_Lock(bp->director->mtx);
304 2133
        bp->vsc->conn++;
305 2133
        bp->vsc->req++;
306 2133
        Lck_Unlock(bp->director->mtx);
307
308 2133
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
309
310 2133
        err = 0;
311 2133
        if (bp->proxy_header != 0)
312 5
                err += VPX_Send_Proxy(*fdp, bp->proxy_header, bo->sp);
313 2133
        if (err < 0) {
314 0
                VSLb(bo->vsl, SLT_FetchError,
315
                     "backend %s: proxy write errno %d (%s)",
316 0
                     VRT_BACKEND_string(dir),
317 0
                     errno, VAS_errtxt(errno));
318
                // account as if connect failed - good idea?
319 0
                VSC_C_main->backend_fail++;
320 0
                bo->htc = NULL;
321 0
                VCP_Close(&pfd);
322 0
                AZ(pfd);
323 0
                Lck_Lock(bp->director->mtx);
324 0
                bp->n_conn--;
325 0
                bp->vsc->conn--;
326 0
                bp->vsc->req--;
327 0
                vbe_connwait_signal_locked(bp);
328 0
                Lck_Unlock(bp->director->mtx);
329 0
                vbe_connwait_fini(cw);
330 0
                return (NULL);
331
        }
332 2133
        bo->acct.bereq_hdrbytes += err;
333
334 2133
        PFD_LocalName(pfd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
335 2133
        PFD_RemoteName(pfd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
336 4266
        VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s %s",
337 2133
            *fdp, VRT_BACKEND_string(dir), abuf2, pbuf2, abuf1, pbuf1,
338 2133
            PFD_State(pfd) == PFD_STATE_STOLEN ? "reuse" : "connect");
339
340 2133
        INIT_OBJ(bo->htc, HTTP_CONN_MAGIC);
341 2133
        bo->htc->priv = pfd;
342 2133
        bo->htc->rfd = fdp;
343 2133
        bo->htc->doclose = SC_NULL;
344 2133
        FIND_TMO(first_byte_timeout,
345
            bo->htc->first_byte_timeout, bo, bp);
346 2133
        FIND_TMO(between_bytes_timeout,
347
            bo->htc->between_bytes_timeout, bo, bp);
348 2133
        vbe_connwait_fini(cw);
349 2133
        return (pfd);
350 2213
}
351
352
static void v_matchproto_(vdi_finish_f)
353 2131
vbe_dir_finish(VRT_CTX, VCL_BACKEND d)
354
{
355
        struct backend *bp;
356
        struct busyobj *bo;
357
        struct pfd *pfd;
358
359 2131
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
360 2131
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
361 2131
        bo = ctx->bo;
362 2131
        CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
363 2131
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
364
365 2131
        CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
366 2131
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
367
368 2131
        pfd = bo->htc->priv;
369 2131
        bo->htc->priv = NULL;
370 2131
        if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) {
371 768
                VSLb(bo->vsl, SLT_BackendClose, "%d %s close %s", *PFD_Fd(pfd),
372 384
                    VRT_BACKEND_string(d), bo->htc->doclose->name);
373 384
                VCP_Close(&pfd);
374 384
                AZ(pfd);
375 384
                Lck_Lock(bp->director->mtx);
376 384
        } else {
377 1747
                assert (PFD_State(pfd) == PFD_STATE_USED);
378 3494
                VSLb(bo->vsl, SLT_BackendClose, "%d %s recycle", *PFD_Fd(pfd),
379 1747
                    VRT_BACKEND_string(d));
380 1747
                Lck_Lock(bp->director->mtx);
381 1747
                VSC_C_main->backend_recycle++;
382 1747
                VCP_Recycle(bo->wrk, &pfd);
383
        }
384 2131
        assert(bp->n_conn > 0);
385 2131
        bp->n_conn--;
386 2131
        AN(bp->vsc);
387 2131
        bp->vsc->conn--;
388
#define ACCT(foo)       bp->vsc->foo += bo->acct.foo;
389
#include "tbl/acct_fields_bereq.h"
390
        vbe_connwait_signal_locked(bp);
391
        Lck_Unlock(bp->director->mtx);
392
        bo->htc = NULL;
393
}
394
395
static int v_matchproto_(vdi_gethdrs_f)
396 2183
vbe_dir_gethdrs(VRT_CTX, VCL_BACKEND d)
397
{
398 2183
        int i, extrachance = 1;
399
        struct backend *bp;
400
        struct pfd *pfd;
401
        struct busyobj *bo;
402
        struct worker *wrk;
403
404 2183
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
405 2183
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
406 2183
        bo = ctx->bo;
407 2183
        CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
408 2183
        CHECK_OBJ_NOTNULL(bo->bereq, HTTP_MAGIC);
409 2183
        if (bo->htc != NULL)
410 0
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
411 2183
        wrk = ctx->bo->wrk;
412 2183
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
413 2183
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
414
415
        /*
416
         * Now that we know our backend, we can set a default Host:
417
         * header if one is necessary.  This cannot be done in the VCL
418
         * because the backend may be chosen by a director.
419
         */
420 2183
        if (!http_GetHdr(bo->bereq, H_Host, NULL) && bp->hosthdr != NULL)
421 68
                http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr);
422
423 2183
        do {
424 2188
                if (bo->htc != NULL)
425 0
                        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
426 2188
                pfd = vbe_dir_getfd(ctx, wrk, d, bp, extrachance == 0 ? 1 : 0);
427 2188
                if (pfd == NULL)
428 79
                        return (-1);
429 2109
                AN(bo->htc);
430 2109
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
431 2109
                if (PFD_State(pfd) != PFD_STATE_STOLEN)
432 1316
                        extrachance = 0;
433
434 4218
                i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes,
435 2109
                    &bo->acct.bereq_bodybytes);
436
437 2109
                if (i == 0 && PFD_State(pfd) != PFD_STATE_USED) {
438 2376
                        if (VCP_Wait(wrk, pfd, VTIM_real() +
439 1584
                            bo->htc->first_byte_timeout) != 0) {
440 1
                                bo->htc->doclose = SC_RX_TIMEOUT;
441 1
                                VSLb(bo->vsl, SLT_FetchError,
442
                                     "first byte timeout (reused connection)");
443 1
                                extrachance = 0;
444 1
                        }
445 792
                }
446
447 2109
                if (bo->htc->doclose == SC_NULL) {
448 2075
                        assert(PFD_State(pfd) == PFD_STATE_USED);
449 2075
                        if (i == 0)
450 2075
                                i = V1F_FetchRespHdr(bo);
451 2075
                        if (i == 0) {
452 2001
                                AN(bo->htc->priv);
453 2001
                                http_VSL_log(bo->beresp);
454 2001
                                return (0);
455
                        }
456 74
                }
457 108
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
458
459
                /*
460
                 * If we recycled a backend connection, there is a finite chance
461
                 * that the backend closed it before we got the bereq to it.
462
                 * In that case do a single automatic retry if req.body allows.
463
                 */
464 108
                vbe_dir_finish(ctx, d);
465 108
                AZ(bo->htc);
466 108
                if (i < 0 || extrachance == 0)
467 103
                        break;
468 5
                if (bo->no_retry != NULL)
469 0
                        break;
470 5
                VSC_C_main->backend_retry++;
471 5
        } while (extrachance--);
472 103
        return (-1);
473 2183
}
474
475
static VCL_IP v_matchproto_(vdi_getip_f)
476 1
vbe_dir_getip(VRT_CTX, VCL_BACKEND d)
477
{
478
        struct pfd *pfd;
479
480 1
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
481 1
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
482 1
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
483 1
        CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC);
484 1
        pfd = ctx->bo->htc->priv;
485
486 1
        return (VCP_GetIp(pfd));
487
}
488
489
/*--------------------------------------------------------------------*/
490
491
static stream_close_t v_matchproto_(vdi_http1pipe_f)
492 25
vbe_dir_http1pipe(VRT_CTX, VCL_BACKEND d)
493
{
494
        int i;
495
        stream_close_t retval;
496
        struct backend *bp;
497
        struct v1p_acct v1a;
498
        struct pfd *pfd;
499
        vtim_real deadline;
500
501 25
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
502 25
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
503 25
        CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC);
504 25
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
505 25
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
506
507 25
        memset(&v1a, 0, sizeof v1a);
508
509
        /* This is hackish... */
510 25
        v1a.req = ctx->req->acct.req_hdrbytes;
511 25
        ctx->req->acct.req_hdrbytes = 0;
512
513 25
        ctx->req->res_pipe = 1;
514
515 25
        retval = SC_TX_ERROR;
516 25
        pfd = vbe_dir_getfd(ctx, ctx->req->wrk, d, bp, 0);
517
518 25
        if (pfd != NULL) {
519 24
                CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC);
520 48
                i = V1F_SendReq(ctx->req->wrk, ctx->bo,
521 24
                    &v1a.bereq, &v1a.out);
522 24
                VSLb_ts_req(ctx->req, "Pipe", W_TIM_real(ctx->req->wrk));
523 24
                if (i == 0) {
524 24
                        deadline = ctx->bo->task_deadline;
525 24
                        if (isnan(deadline))
526 23
                                deadline = cache_param->pipe_task_deadline;
527 24
                        if (deadline > 0.)
528 3
                                deadline += ctx->req->sp->t_idle;
529 48
                        retval = V1P_Process(ctx->req, *PFD_Fd(pfd), &v1a,
530 24
                            deadline);
531 24
                }
532 24
                VSLb_ts_req(ctx->req, "PipeSess", W_TIM_real(ctx->req->wrk));
533 24
                ctx->bo->htc->doclose = retval;
534 24
                vbe_dir_finish(ctx, d);
535 24
        }
536 25
        V1P_Charge(ctx->req, &v1a, bp->vsc);
537 25
        CHECK_OBJ_NOTNULL(retval, STREAM_CLOSE_MAGIC);
538 25
        return (retval);
539
}
540
541
/*--------------------------------------------------------------------*/
542
543
static void
544 1544
vbe_dir_event(const struct director *d, enum vcl_event_e ev)
545
{
546
        struct backend *bp;
547
548 1544
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
549 1544
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
550
551 1544
        if (ev == VCL_EVENT_WARM) {
552 1315
                VRT_VSC_Reveal(bp->vsc_seg);
553 1315
                if (bp->probe != NULL)
554 37
                        VBP_Control(bp, 1);
555 1544
        } else if (ev == VCL_EVENT_COLD) {
556 82
                if (bp->probe != NULL)
557 7
                        VBP_Control(bp, 0);
558 82
                VRT_VSC_Hide(bp->vsc_seg);
559 229
        } else if (ev == VCL_EVENT_DISCARD) {
560 66
                VRT_DelDirector(&bp->director);
561 147
        } else if (ev == VDI_EVENT_SICK) {
562 81
                const struct vdi_ahealth *ah = d->vdir->admin_health;
563
564 81
                if (ah == VDI_AH_SICK || (ah == VDI_AH_AUTO && bp->sick))
565 71
                        vbe_connwait_broadcast(bp);
566 81
        }
567 1544
}
568
569
/*---------------------------------------------------------------------*/
570
571
static void
572 80
vbe_free(struct backend *be)
573
{
574
575 80
        CHECK_OBJ_NOTNULL(be, BACKEND_MAGIC);
576
577 80
        if (be->probe != NULL)
578 6
                VBP_Remove(be);
579
580 80
        VSC_vbe_Destroy(&be->vsc_seg);
581 80
        Lck_Lock(&backends_mtx);
582 80
        VSC_C_main->n_backend--;
583 80
        Lck_Unlock(&backends_mtx);
584 80
        VCP_Rel(&be->conn_pool);
585
586
#define DA(x)   do { if (be->x != NULL) free(be->x); } while (0)
587
#define DN(x)   /**/
588 80
        VRT_BACKEND_HANDLE();
589
#undef DA
590
#undef DN
591 80
        free(be->endpoint);
592
593 80
        assert(VTAILQ_EMPTY(&be->cw_head));
594 80
        FREE_OBJ(be);
595 80
}
596
597
static void v_matchproto_(vdi_destroy_f)
598 80
vbe_destroy(const struct director *d)
599
{
600
        struct backend *be;
601
602 80
        CAST_OBJ_NOTNULL(be, d->priv, BACKEND_MAGIC);
603 80
        vbe_free(be);
604 80
}
605
606
/*--------------------------------------------------------------------*/
607
608
static void
609 6
vbe_panic(const struct director *d, struct vsb *vsb)
610
{
611
        struct backend *bp;
612
613 6
        PAN_CheckMagic(vsb, d, DIRECTOR_MAGIC);
614 6
        bp = d->priv;
615 6
        PAN_CheckMagic(vsb, bp, BACKEND_MAGIC);
616
617 6
        VCP_Panic(vsb, bp->conn_pool);
618 6
        VSB_printf(vsb, "hosthdr = %s,\n", bp->hosthdr);
619 6
        VSB_printf(vsb, "n_conn = %u,\n", bp->n_conn);
620 6
}
621
622
/*--------------------------------------------------------------------
623
 */
624
625
static void v_matchproto_(vdi_list_f)
626 1150
vbe_list(VRT_CTX, const struct director *d, struct vsb *vsb, int pflag,
627
    int jflag)
628
{
629
        char buf[VTCP_ADDRBUFSIZE];
630
        struct backend *bp;
631
        struct vrt_endpoint *vep;
632
633 1150
        (void)ctx;
634
635 1150
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
636 1150
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
637 1150
        CHECK_OBJ_NOTNULL(bp->endpoint, VRT_ENDPOINT_MAGIC);
638
639 1150
        vep = bp->endpoint;
640
641 1150
        if (bp->probe != NULL)
642 78
                VBP_Status(vsb, bp, pflag, jflag);
643 1072
        else if (jflag && pflag)
644 3
                VSB_cat(vsb, "{},\n");
645 1069
        else if (jflag)
646 11
                VSB_cat(vsb, "[0, 0, \"healthy\"]");
647 1058
        else if (pflag)
648 9
                return;
649
        else
650 1049
                VSB_cat(vsb, "0/0\thealthy");
651
652 1141
        if (jflag && pflag) {
653 5
                if (vep->ipv4 != NULL) {
654 5
                        VTCP_name(vep->ipv4, buf, sizeof buf, NULL, 0);
655 5
                        VSB_printf(vsb, "\"ipv4\": \"%s\",\n", buf);
656 5
                }
657 5
                if (vep->ipv6 != NULL) {
658 0
                        VTCP_name(vep->ipv6, buf, sizeof buf, NULL, 0);
659 0
                        VSB_printf(vsb, "\"ipv6\": \"%s\",\n", buf);
660 0
                }
661 5
        }
662 1150
}
663
664
/*--------------------------------------------------------------------
665
 */
666
667
static VCL_BOOL v_matchproto_(vdi_healthy_f)
668 67
vbe_healthy(VRT_CTX, VCL_BACKEND d, VCL_TIME *t)
669
{
670
        struct backend *bp;
671
672 67
        (void)ctx;
673 67
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
674 67
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
675
676 67
        if (t != NULL)
677 42
                *t = bp->changed;
678
679 67
        return (!bp->sick);
680
}
681
682
/*--------------------------------------------------------------------
683
 */
684
685
static const struct vdi_methods vbe_methods[1] = {{
686
        .magic =                VDI_METHODS_MAGIC,
687
        .type =                 "backend",
688
        .http1pipe =            vbe_dir_http1pipe,
689
        .gethdrs =              vbe_dir_gethdrs,
690
        .getip =                vbe_dir_getip,
691
        .finish =               vbe_dir_finish,
692
        .event =                vbe_dir_event,
693
        .destroy =              vbe_destroy,
694
        .panic =                vbe_panic,
695
        .list =                 vbe_list,
696
        .healthy =              vbe_healthy
697
}};
698
699
static const struct vdi_methods vbe_methods_noprobe[1] = {{
700
        .magic =                VDI_METHODS_MAGIC,
701
        .type =                 "backend",
702
        .http1pipe =            vbe_dir_http1pipe,
703
        .gethdrs =              vbe_dir_gethdrs,
704
        .getip =                vbe_dir_getip,
705
        .finish =               vbe_dir_finish,
706
        .event =                vbe_dir_event,
707
        .destroy =              vbe_destroy,
708
        .panic =                vbe_panic,
709
        .list =                 vbe_list
710
}};
711
712
/*--------------------------------------------------------------------
713
 * Create a new static or dynamic director::backend instance.
714
 */
715
716
size_t
717 1212
VRT_backend_vsm_need(VRT_CTX)
718
{
719 1212
        (void)ctx;
720 1212
        return (VRT_VSC_Overhead(VSC_vbe_size));
721
}
722
723
/*
724
 * The new_backend via parameter is a VCL_BACKEND, but we need a (struct
725
 * backend)
726
 *
727
 * For now, we resolve it when creating the backend, which implies no redundancy
728
 * / load balancing across the via director if it is more than a simple backend.
729
 */
730
731
static const struct backend *
732 8
via_resolve(VRT_CTX, const struct vrt_endpoint *vep, VCL_BACKEND via)
733
{
734 8
        const struct backend *viabe = NULL;
735
736 8
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
737 8
        CHECK_OBJ_NOTNULL(via, DIRECTOR_MAGIC);
738
739 8
        if (vep->uds_path) {
740 0
                VRT_fail(ctx, "Via is only supported for IP addresses");
741 0
                return (NULL);
742
        }
743
744 8
        via = VRT_DirectorResolve(ctx, via);
745
746 8
        if (via == NULL) {
747 0
                VRT_fail(ctx, "Via resolution failed");
748 0
                return (NULL);
749
        }
750
751 8
        CHECK_OBJ(via, DIRECTOR_MAGIC);
752 8
        CHECK_OBJ_NOTNULL(via->vdir, VCLDIR_MAGIC);
753
754 8
        if (via->vdir->methods == vbe_methods ||
755 8
            via->vdir->methods == vbe_methods_noprobe)
756 8
                CAST_OBJ_NOTNULL(viabe, via->priv, BACKEND_MAGIC);
757
758 8
        if (viabe == NULL)
759 0
                VRT_fail(ctx, "Via does not resolve to a backend");
760
761 8
        return (viabe);
762 8
}
763
764
/*
765
 * construct a new endpoint identical to vep with sa in a proxy header
766
 */
767
static struct vrt_endpoint *
768 8
via_endpoint(const struct vrt_endpoint *vep, const struct suckaddr *sa,
769
    const char *auth)
770
{
771
        struct vsb *preamble;
772
        struct vrt_blob blob[1];
773
        struct vrt_endpoint *nvep, *ret;
774
        const struct suckaddr *client_bogo;
775
776 8
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
777 8
        AN(sa);
778
779 8
        nvep = VRT_Endpoint_Clone(vep);
780 8
        CHECK_OBJ_NOTNULL(nvep, VRT_ENDPOINT_MAGIC);
781
782 8
        if (VSA_Get_Proto(sa) == AF_INET6)
783 0
                client_bogo = bogo_ip6;
784
        else
785 8
                client_bogo = bogo_ip;
786
787 8
        preamble = VSB_new_auto();
788 8
        AN(preamble);
789 8
        VPX_Format_Proxy(preamble, 2, client_bogo, sa, auth);
790 8
        blob->blob = VSB_data(preamble);
791 8
        blob->len = VSB_len(preamble);
792 8
        nvep->preamble = blob;
793 8
        ret = VRT_Endpoint_Clone(nvep);
794 8
        CHECK_OBJ_NOTNULL(ret, VRT_ENDPOINT_MAGIC);
795 8
        VSB_destroy(&preamble);
796 8
        FREE_OBJ(nvep);
797
798 8
        return (ret);
799
}
800
801
VCL_BACKEND
802 1322
VRT_new_backend_clustered(VRT_CTX, struct vsmw_cluster *vc,
803
    const struct vrt_backend *vrt, VCL_BACKEND via)
804
{
805
        struct backend *be;
806
        struct vcl *vcl;
807
        const struct vrt_backend_probe *vbp;
808
        const struct vrt_endpoint *vep;
809
        const struct vdi_methods *m;
810 1322
        const struct suckaddr *sa = NULL;
811
        char abuf[VTCP_ADDRBUFSIZE];
812 1322
        const struct backend *viabe = NULL;
813
814 1322
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
815 1322
        CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC);
816 1322
        vep = vrt->endpoint;
817 1322
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
818 1322
        if (vep->uds_path == NULL) {
819 1276
                if (vep->ipv4 == NULL && vep->ipv6 == NULL) {
820 0
                        VRT_fail(ctx, "%s: Illegal IP", __func__);
821 0
                        return (NULL);
822
                }
823 1276
        } else {
824 46
                assert(vep->ipv4== NULL && vep->ipv6== NULL);
825
        }
826
827 1322
        if (via != NULL) {
828 8
                viabe = via_resolve(ctx, vep, via);
829 8
                if (viabe == NULL)
830 0
                        return (NULL);
831 8
        }
832
833 1322
        vcl = ctx->vcl;
834 1322
        AN(vcl);
835 1322
        AN(vrt->vcl_name);
836
837
        /* Create new backend */
838 1322
        ALLOC_OBJ(be, BACKEND_MAGIC);
839 1322
        if (be == NULL)
840 0
                return (NULL);
841 1322
        VTAILQ_INIT(&be->cw_head);
842
843
#define DA(x)   do { if (vrt->x != NULL) REPLACE((be->x), (vrt->x)); } while (0)
844
#define DN(x)   do { be->x = vrt->x; } while (0)
845 1322
        VRT_BACKEND_HANDLE();
846
#undef DA
847
#undef DN
848
849
#define CPTMO(a, b, x) do {                             \
850
                if ((a)->x < 0.0 || isnan((a)->x))      \
851
                        (a)->x = (b)->x;                \
852
        } while(0)
853
854 1320
        if (viabe != NULL) {
855 8
                CPTMO(be, viabe, connect_timeout);
856 8
                CPTMO(be, viabe, first_byte_timeout);
857 8
                CPTMO(be, viabe, between_bytes_timeout);
858 8
        }
859
#undef CPTMO
860
861 1320
        if (viabe || be->hosthdr == NULL) {
862 9
                if (vrt->endpoint->uds_path != NULL)
863 1
                        sa = bogo_ip;
864 8
                else if (cache_param->prefer_ipv6 && vep->ipv6 != NULL)
865 0
                        sa = vep->ipv6;
866 8
                else if (vep->ipv4!= NULL)
867 8
                        sa = vep->ipv4;
868
                else
869 0
                        sa = vep->ipv6;
870 9
                if (be->hosthdr == NULL) {
871 1
                        VTCP_name(sa, abuf, sizeof abuf, NULL, 0);
872 1
                        REPLACE(be->hosthdr, abuf);
873 1
                }
874 9
        }
875
876 2640
        be->vsc = VSC_vbe_New(vc, &be->vsc_seg,
877 1320
            "%s.%s", VCL_Name(ctx->vcl), vrt->vcl_name);
878 1320
        AN(be->vsc);
879 1320
        if (! vcl->temp->is_warm)
880 1307
                VRT_VSC_Hide(be->vsc_seg);
881
882 1320
        if (viabe)
883 14
                vep = be->endpoint = via_endpoint(viabe->endpoint, sa,
884 7
                    be->authority);
885
        else
886 1313
                vep = be->endpoint = VRT_Endpoint_Clone(vep);
887
888 1320
        AN(vep);
889 1320
        be->conn_pool = VCP_Ref(vep, vbe_proto_ident);
890 1320
        AN(be->conn_pool);
891
892 1320
        vbp = vrt->probe;
893 1320
        if (vbp == NULL)
894 1296
                vbp = VCL_DefaultProbe(vcl);
895
896 1320
        if (vbp != NULL) {
897 36
                VBP_Insert(be, vbp, be->conn_pool);
898 36
                m = vbe_methods;
899 36
        } else {
900 1284
                be->sick = 0;
901 1284
                be->vsc->happy = UINT64_MAX;
902 1284
                m = vbe_methods_noprobe;
903
        }
904
905 1320
        Lck_Lock(&backends_mtx);
906 1320
        VSC_C_main->n_backend++;
907 1320
        Lck_Unlock(&backends_mtx);
908
909 1320
        be->director = VRT_AddDirector(ctx, m, be, "%s", vrt->vcl_name);
910
911 1320
        if (be->director == NULL) {
912 0
                vbe_free(be);
913 0
                return (NULL);
914
        }
915
        /* for cold VCL, update initial director state */
916 1320
        if (be->probe != NULL)
917 37
                VBP_Update_Backend(be->probe);
918 1320
        return (be->director);
919 1320
}
920
921
VCL_BACKEND
922 32
VRT_new_backend(VRT_CTX, const struct vrt_backend *vrt, VCL_BACKEND via)
923
{
924
925 32
        CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC);
926 32
        CHECK_OBJ_NOTNULL(vrt->endpoint, VRT_ENDPOINT_MAGIC);
927 32
        return (VRT_new_backend_clustered(ctx, NULL, vrt, via));
928
}
929
930
/*--------------------------------------------------------------------
931
 * Delete a dynamic director::backend instance.  Undeleted dynamic and
932
 * static instances are GC'ed when the VCL is discarded (in cache_vcl.c)
933
 */
934
935
void
936 78
VRT_delete_backend(VRT_CTX, VCL_BACKEND *dp)
937
{
938
939 78
        (void)ctx;
940 78
        CHECK_OBJ_NOTNULL(*dp, DIRECTOR_MAGIC);
941 78
        VRT_DisableDirector(*dp);
942 78
        VRT_Assign_Backend(dp, NULL);
943 78
}
944
945
/*---------------------------------------------------------------------*/
946
947
void
948 935
VBE_InitCfg(void)
949
{
950
951 935
        Lck_New(&backends_mtx, lck_vbe);
952 935
}