varnish-cache/bin/varnishd/cache/cache_conn_pool.c
0
/*-
1
 * Copyright (c) 2015 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * (TCP|UDS) connection pools.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vsa.h"
40
#include "vsha256.h"
41
#include "vtcp.h"
42
#include "vus.h"
43
#include "vtim.h"
44
#include "waiter/waiter.h"
45
46
#include "cache_conn_pool.h"
47
#include "cache_pool.h"
48
49
#include "VSC_vcp.h"
50
51
struct conn_pool;
52
static inline int vcp_cmp(const struct conn_pool *a, const struct conn_pool *b);
53
54
/*--------------------------------------------------------------------
55
 */
56
57
struct pfd {
58
        unsigned                magic;
59
#define PFD_MAGIC               0x0c5e6593
60
        int                     fd;
61
        VTAILQ_ENTRY(pfd)       list;
62
        VCL_IP                  addr;
63
        uint8_t                 state;
64
        struct waited           waited[1];
65
        struct conn_pool        *conn_pool;
66
67
        pthread_cond_t          *cond;
68
};
69
70
/*--------------------------------------------------------------------
71
 */
72
73
typedef int cp_open_f(const struct conn_pool *, vtim_dur tmo, VCL_IP *ap);
74
typedef void cp_close_f(struct pfd *);
75
typedef void cp_name_f(const struct pfd *, char *, unsigned, char *, unsigned);
76
77
struct cp_methods {
78
        cp_open_f                               *open;
79
        cp_close_f                              *close;
80
        cp_name_f                               *local_name;
81
        cp_name_f                               *remote_name;
82
};
83
84
struct conn_pool {
85
        unsigned                                magic;
86
#define CONN_POOL_MAGIC                         0x85099bc3
87
88
        const struct cp_methods                 *methods;
89
90
        struct vrt_endpoint                     *endpoint;
91
        char                                    ident[VSHA256_DIGEST_LENGTH];
92
93
        VRBT_ENTRY(conn_pool)                   entry;
94
        int                                     refcnt;
95
        struct lock                             mtx;
96
97
        VTAILQ_HEAD(, pfd)                      connlist;
98
        int                                     n_conn;
99
100
        int                                     n_kill;
101
102
        int                                     n_used;
103
104
        vtim_mono                               holddown;
105
        int                                     holddown_errno;
106
};
107
108
static struct lock conn_pools_mtx;
109
static struct lock dead_pools_mtx;
110
static struct VSC_vcp *vsc;
111
112
VRBT_HEAD(vrb, conn_pool);
113 17
VRBT_GENERATE_REMOVE_COLOR(vrb, conn_pool, entry, static)
114 42
VRBT_GENERATE_REMOVE(vrb, conn_pool, entry, static)
115 216
VRBT_GENERATE_INSERT_COLOR(vrb, conn_pool, entry, static)
116 1026
VRBT_GENERATE_INSERT_FINISH(vrb, conn_pool, entry, static)
117 1558
VRBT_GENERATE_INSERT(vrb, conn_pool, entry, vcp_cmp, static)
118 2
VRBT_GENERATE_NEXT(vrb, conn_pool, entry, static)
119 4
VRBT_GENERATE_MINMAX(vrb, conn_pool, entry, static)
120
121
static struct vrb conn_pools = VRBT_INITIALIZER(&conn_pools);
122
static struct vrb dead_pools = VRBT_INITIALIZER(&dying_cps);
123
124
/*--------------------------------------------------------------------
125
 */
126
127
unsigned
128 10136
PFD_State(const struct pfd *p)
129
{
130 10136
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
131 10136
        return (p->state);
132
}
133
134
int *
135 4286
PFD_Fd(struct pfd *p)
136
{
137 4286
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
138 4286
        return (&(p->fd));
139
}
140
141
void
142 2132
PFD_LocalName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf,
143
              unsigned plen)
144
{
145 2132
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
146 2132
        CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC);
147 2132
        p->conn_pool->methods->local_name(p, abuf, alen, pbuf, plen);
148 2132
}
149
150
void
151 2132
PFD_RemoteName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf,
152
               unsigned plen)
153
{
154 2132
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
155 2132
        CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC);
156 2132
        p->conn_pool->methods->remote_name(p, abuf, alen, pbuf, plen);
157 2132
}
158
159
/*--------------------------------------------------------------------
160
 */
161
162
static inline int
163 532
vcp_cmp(const struct conn_pool *a, const struct conn_pool *b)
164
{
165 532
        return (memcmp(a->ident, b->ident, sizeof b->ident));
166
}
167
168
/*--------------------------------------------------------------------
169
 * Waiter-handler
170
 */
171
172
static void  v_matchproto_(waiter_handle_f)
173 1727
vcp_handle(struct waited *w, enum wait_event ev, vtim_real now)
174
{
175
        struct pfd *pfd;
176
        struct conn_pool *cp;
177
178 1727
        CHECK_OBJ_NOTNULL(w, WAITED_MAGIC);
179 1727
        CAST_OBJ_NOTNULL(pfd, w->priv1, PFD_MAGIC);
180 1727
        (void)ev;
181 1727
        (void)now;
182 1727
        CHECK_OBJ_NOTNULL(pfd->conn_pool, CONN_POOL_MAGIC);
183 1727
        cp = pfd->conn_pool;
184
185 1727
        Lck_Lock(&cp->mtx);
186
187 1727
        switch (pfd->state) {
188
        case PFD_STATE_STOLEN:
189 793
                pfd->state = PFD_STATE_USED;
190 793
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
191 793
                AN(pfd->cond);
192 793
                PTOK(pthread_cond_signal(pfd->cond));
193 793
                break;
194
        case PFD_STATE_AVAIL:
195 930
                cp->methods->close(pfd);
196 930
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
197 930
                cp->n_conn--;
198 930
                FREE_OBJ(pfd);
199 930
                break;
200
        case PFD_STATE_CLEANUP:
201 4
                cp->methods->close(pfd);
202 4
                cp->n_kill--;
203 4
                memset(pfd, 0x11, sizeof *pfd);
204 4
                free(pfd);
205 4
                break;
206
        default:
207 0
                WRONG("Wrong pfd state");
208 0
        }
209 1727
        Lck_Unlock(&cp->mtx);
210 1727
}
211
212
213
/*--------------------------------------------------------------------
214
 */
215
216
void
217 37
VCP_AddRef(struct conn_pool *cp)
218
{
219 37
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
220
221 37
        Lck_Lock(&conn_pools_mtx);
222 37
        assert(cp->refcnt > 0);
223 37
        cp->refcnt++;
224 37
        Lck_Unlock(&conn_pools_mtx);
225 37
}
226
227
/*--------------------------------------------------------------------
228
 */
229
230
static void
231 326
vcp_destroy(struct conn_pool **cpp)
232
{
233
        struct conn_pool *cp;
234
235 326
        TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC);
236 326
        AZ(cp->n_conn);
237 326
        AZ(cp->n_kill);
238 326
        Lck_Delete(&cp->mtx);
239 326
        FREE_OBJ(cp->endpoint);
240 326
        FREE_OBJ(cp);
241 326
}
242
243
/*--------------------------------------------------------------------
244
 * Release Conn pool, destroy or stash for future destruction if last
245
 * reference.
246
 */
247
248
void
249 87
VCP_Rel(struct conn_pool **cpp)
250
{
251
        struct conn_pool *cp;
252
        struct pfd *pfd, *pfd2;
253
        int n_kill;
254
255 87
        TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC);
256
257 87
        Lck_Lock(&conn_pools_mtx);
258 87
        assert(cp->refcnt > 0);
259 87
        if (--cp->refcnt > 0) {
260 60
                Lck_Unlock(&conn_pools_mtx);
261 60
                return;
262
        }
263 27
        AZ(cp->n_used);
264 27
        VRBT_REMOVE(vrb, &conn_pools, cp);
265 27
        Lck_Unlock(&conn_pools_mtx);
266
267 27
        Lck_Lock(&cp->mtx);
268 29
        VTAILQ_FOREACH_SAFE(pfd, &cp->connlist, list, pfd2) {
269 2
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
270 2
                cp->n_conn--;
271 2
                assert(pfd->state == PFD_STATE_AVAIL);
272 2
                pfd->state = PFD_STATE_CLEANUP;
273 2
                (void)shutdown(pfd->fd, SHUT_RDWR);
274 2
                cp->n_kill++;
275 2
        }
276 27
        n_kill = cp->n_kill;
277 27
        Lck_Unlock(&cp->mtx);
278 27
        if (n_kill == 0) {
279 25
                vcp_destroy(&cp);
280 25
                return;
281
        }
282 2
        Lck_Lock(&dead_pools_mtx);
283
        /*
284
         * Here we reuse cp's entry but it will probably not be correctly
285
         * indexed because of the hack in VCP_RelPoll
286
         */
287 2
        VRBT_INSERT(vrb, &dead_pools, cp);
288 2
        Lck_Unlock(&dead_pools_mtx);
289 87
}
290
291
void
292 7640
VCP_RelPoll(void)
293
{
294
        struct vrb dead;
295
        struct conn_pool *cp, *cp2;
296
        int n_kill;
297
298 7640
        ASSERT_CLI();
299
300 7640
        Lck_Lock(&dead_pools_mtx);
301 7640
        if (VRBT_EMPTY(&dead_pools)) {
302 7638
                Lck_Unlock(&dead_pools_mtx);
303 7638
                return;
304
        }
305 2
        dead = dead_pools;
306 2
        VRBT_INIT(&dead_pools);
307 2
        Lck_Unlock(&dead_pools_mtx);
308
309 4
        VRBT_FOREACH_SAFE(cp, vrb, &dead, cp2) {
310 2
                CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
311 2
                Lck_Lock(&cp->mtx);
312 2
                n_kill = cp->n_kill;
313 2
                Lck_Unlock(&cp->mtx);
314 2
                if (n_kill > 0)
315 0
                        continue;
316 2
                VRBT_REMOVE(vrb, &dead, cp);
317 2
                vcp_destroy(&cp);
318 2
        }
319
320 2
        if (VRBT_EMPTY(&dead))
321 2
                return;
322
323 0
        Lck_Lock(&dead_pools_mtx);
324
        /*
325
         * The following insertion will most likely result in an
326
         * unordered tree, but in this case it does not matter
327
         * as we just want to iterate over all the elements
328
         * in the tree in order to delete them.
329
         */
330 0
        VRBT_INSERT(vrb, &dead_pools, dead.rbh_root);
331 0
        Lck_Unlock(&dead_pools_mtx);
332 7640
}
333
334
/*--------------------------------------------------------------------
335
 * Recycle a connection.
336
 */
337
338
void
339 1747
VCP_Recycle(const struct worker *wrk, struct pfd **pfdp)
340
{
341
        struct pfd *pfd;
342
        struct conn_pool *cp;
343 1747
        int i = 0;
344
345 1747
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
346 1747
        TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC);
347 1747
        cp = pfd->conn_pool;
348 1747
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
349
350 1747
        assert(pfd->state == PFD_STATE_USED);
351 1747
        assert(pfd->fd > 0);
352
353 1747
        Lck_Lock(&cp->mtx);
354 1747
        cp->n_used--;
355
356 1747
        pfd->waited->priv1 = pfd;
357 1747
        pfd->waited->fd = pfd->fd;
358 1747
        pfd->waited->idle = VTIM_real();
359 1747
        pfd->state = PFD_STATE_AVAIL;
360 1747
        pfd->waited->func = vcp_handle;
361 1747
        pfd->waited->tmo = cache_param->backend_idle_timeout;
362 1747
        if (Wait_Enter(wrk->pool->waiter, pfd->waited)) {
363 0
                cp->methods->close(pfd);
364 0
                memset(pfd, 0x33, sizeof *pfd);
365 0
                free(pfd);
366
                // XXX: stats
367 0
                pfd = NULL;
368 0
        } else {
369 1747
                VTAILQ_INSERT_HEAD(&cp->connlist, pfd, list);
370 1747
                i++;
371
        }
372
373 1747
        if (pfd != NULL)
374 1747
                cp->n_conn++;
375 1747
        Lck_Unlock(&cp->mtx);
376
377 1747
        if (i && DO_DEBUG(DBG_VTC_MODE)) {
378
                /*
379
                 * In varnishtest we do not have the luxury of using
380
                 * multiple backend connections, so whenever we end up
381
                 * in the "pending" case, take a short nap to let the
382
                 * waiter catch up and put the pfd back into circulations.
383
                 *
384
                 * In particular ESI:include related tests suffer random
385
                 * failures without this.
386
                 *
387
                 * In normal operation, the only effect is that we will
388
                 * have N+1 backend connections rather than N, which is
389
                 * entirely harmless.
390
                 */
391 1747
                VTIM_sleep(0.01);
392 1747
        }
393 1747
}
394
395
/*--------------------------------------------------------------------
396
 * Open a new connection from pool.
397
 */
398
399
int
400 1561
VCP_Open(struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap, int *err)
401
{
402
        int r;
403
        vtim_mono h;
404
405 1561
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
406 1561
        AN(err);
407
408 1561
        while (cp->holddown > 0) {
409 23
                Lck_Lock(&cp->mtx);
410 23
                if (cp->holddown == 0) {
411 0
                        Lck_Unlock(&cp->mtx);
412 0
                        break;
413
                }
414
415 23
                if (VTIM_mono() >= cp->holddown) {
416 3
                        cp->holddown = 0;
417 3
                        Lck_Unlock(&cp->mtx);
418 3
                        break;
419
                }
420
421 20
                *err = 0;
422 20
                errno = cp->holddown_errno;
423 20
                Lck_Unlock(&cp->mtx);
424 20
                return (-1);
425
        }
426
427 1541
        *err = errno = 0;
428 1541
        r = cp->methods->open(cp, tmo, ap);
429
430 1541
        if (r >= 0 && errno == 0 && cp->endpoint->preamble != NULL &&
431 9
             cp->endpoint->preamble->len > 0) {
432 9
                CHECK_OBJ(cp->endpoint->preamble, VRT_BLOB_MAGIC);
433 27
                if (write(r, cp->endpoint->preamble->blob,
434 18
                    cp->endpoint->preamble->len) !=
435 9
                    cp->endpoint->preamble->len) {
436 0
                        *err = errno;
437 0
                        closefd(&r);
438 0
                }
439 9
        } else {
440 1532
                *err = errno;
441
        }
442
443 1541
        if (r >= 0)
444 1518
                return (r);
445
446 23
        h = 0;
447
448 23
        switch (errno) {
449
        case EACCES:
450
        case EPERM:
451 0
                h = cache_param->backend_local_error_holddown;
452 0
                break;
453
        case EADDRNOTAVAIL:
454 0
                h = cache_param->backend_local_error_holddown;
455 0
                break;
456
        case ECONNREFUSED:
457 23
                h = cache_param->backend_remote_error_holddown;
458 23
                break;
459
        case ENETUNREACH:
460 0
                h = cache_param->backend_remote_error_holddown;
461 0
                break;
462
        default:
463 0
                break;
464
        }
465
466 23
        if (h == 0)
467 0
                return (r);
468
469 23
        Lck_Lock(&cp->mtx);
470 23
        h += VTIM_mono();
471 23
        if (cp->holddown == 0 || h < cp->holddown) {
472 21
                cp->holddown = h;
473 21
                cp->holddown_errno = errno;
474 21
        }
475
476 23
        Lck_Unlock(&cp->mtx);
477
478 23
        return (r);
479 1561
}
480
481
/*--------------------------------------------------------------------
482
 * Close a connection.
483
 */
484
485
void
486 384
VCP_Close(struct pfd **pfdp)
487
{
488
        struct pfd *pfd;
489
        struct conn_pool *cp;
490
491 384
        TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC);
492 384
        cp = pfd->conn_pool;
493 384
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
494
495 384
        assert(pfd->fd > 0);
496
497 384
        Lck_Lock(&cp->mtx);
498 384
        assert(pfd->state == PFD_STATE_USED || pfd->state == PFD_STATE_STOLEN);
499 384
        cp->n_used--;
500 384
        if (pfd->state == PFD_STATE_STOLEN) {
501 2
                (void)shutdown(pfd->fd, SHUT_RDWR);
502 2
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
503 2
                pfd->state = PFD_STATE_CLEANUP;
504 2
                cp->n_kill++;
505 2
        } else {
506 382
                assert(pfd->state == PFD_STATE_USED);
507 382
                cp->methods->close(pfd);
508 382
                memset(pfd, 0x44, sizeof *pfd);
509 382
                free(pfd);
510
        }
511 384
        Lck_Unlock(&cp->mtx);
512 384
}
513
514
/*--------------------------------------------------------------------
515
 * Get a connection, possibly recycled
516
 */
517
518
struct pfd *
519 2163
VCP_Get(struct conn_pool *cp, vtim_dur tmo, struct worker *wrk,
520
    unsigned force_fresh, int *err)
521
{
522
        struct pfd *pfd;
523
524 2163
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
525 2163
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
526 2163
        AN(err);
527
528 2163
        *err = 0;
529 2163
        Lck_Lock(&cp->mtx);
530 2163
        pfd = VTAILQ_FIRST(&cp->connlist);
531 2163
        CHECK_OBJ_ORNULL(pfd, PFD_MAGIC);
532 2163
        if (force_fresh || pfd == NULL || pfd->state == PFD_STATE_STOLEN) {
533 1368
                pfd = NULL;
534 1368
        } else {
535 795
                assert(pfd->conn_pool == cp);
536 795
                assert(pfd->state == PFD_STATE_AVAIL);
537 795
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
538 795
                VTAILQ_INSERT_TAIL(&cp->connlist, pfd, list);
539 795
                cp->n_conn--;
540 795
                VSC_C_main->backend_reuse++;
541 795
                pfd->state = PFD_STATE_STOLEN;
542 795
                pfd->cond = &wrk->cond;
543
        }
544 2163
        cp->n_used++;                   // Opening mostly works
545 2163
        Lck_Unlock(&cp->mtx);
546
547 2163
        if (pfd != NULL)
548 795
                return (pfd);
549
550 1368
        ALLOC_OBJ(pfd, PFD_MAGIC);
551 1368
        AN(pfd);
552 1368
        INIT_OBJ(pfd->waited, WAITED_MAGIC);
553 1368
        pfd->state = PFD_STATE_USED;
554 1368
        pfd->conn_pool = cp;
555 1368
        pfd->fd = VCP_Open(cp, tmo, &pfd->addr, err);
556 1368
        if (pfd->fd < 0) {
557 31
                FREE_OBJ(pfd);
558 31
                Lck_Lock(&cp->mtx);
559 31
                cp->n_used--;           // Nope, didn't work after all.
560 31
                Lck_Unlock(&cp->mtx);
561 31
        } else
562 1337
                VSC_C_main->backend_conn++;
563
564 1368
        return (pfd);
565 2163
}
566
567
/*--------------------------------------------------------------------
568
 */
569
570
int
571 791
VCP_Wait(struct worker *wrk, struct pfd *pfd, vtim_real when)
572
{
573
        struct conn_pool *cp;
574
        int r;
575
576 791
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
577 791
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
578 791
        cp = pfd->conn_pool;
579 791
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
580 791
        assert(pfd->cond == &wrk->cond);
581 791
        Lck_Lock(&cp->mtx);
582 1581
        while (pfd->state == PFD_STATE_STOLEN) {
583 791
                r = Lck_CondWaitUntil(&wrk->cond, &cp->mtx, when);
584 791
                if (r != 0) {
585 1
                        if (r == EINTR)
586 0
                                continue;
587 1
                        assert(r == ETIMEDOUT);
588 1
                        Lck_Unlock(&cp->mtx);
589 1
                        return (1);
590
                }
591
        }
592 790
        assert(pfd->state == PFD_STATE_USED);
593 790
        pfd->cond = NULL;
594 790
        Lck_Unlock(&cp->mtx);
595
596 790
        return (0);
597 791
}
598
599
/*--------------------------------------------------------------------
600
 */
601
602
VCL_IP
603 1
VCP_GetIp(struct pfd *pfd)
604
{
605
606 1
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
607 1
        return (pfd->addr);
608
}
609
610
/*--------------------------------------------------------------------*/
611
612
static void
613 6
vcp_panic_endpoint(struct vsb *vsb, const struct vrt_endpoint *vep)
614
{
615
        char h[VTCP_ADDRBUFSIZE];
616
        char p[VTCP_PORTBUFSIZE];
617
618 6
        if (PAN_dump_struct(vsb, vep, VRT_ENDPOINT_MAGIC, "vrt_endpoint"))
619 0
                return;
620 6
        if (vep->uds_path)
621 0
                VSB_printf(vsb, "uds_path = %s,\n", vep->uds_path);
622 6
        if (vep->ipv4 && VSA_Sane(vep->ipv4)) {
623 6
                VTCP_name(vep->ipv4, h, sizeof h, p, sizeof p);
624 6
                VSB_printf(vsb, "ipv4 = %s, ", h);
625 6
                VSB_printf(vsb, "port = %s,\n", p);
626 6
        }
627 6
        if (vep->ipv6 && VSA_Sane(vep->ipv6)) {
628 0
                VTCP_name(vep->ipv6, h, sizeof h, p, sizeof p);
629 0
                VSB_printf(vsb, "ipv6 = %s, ", h);
630 0
                VSB_printf(vsb, "port = %s,\n", p);
631 0
        }
632 6
        VSB_indent(vsb, -2);
633 6
        VSB_cat(vsb, "},\n");
634 6
}
635
636
void
637 6
VCP_Panic(struct vsb *vsb, struct conn_pool *cp)
638
{
639
640 6
        if (PAN_dump_struct(vsb, cp, CONN_POOL_MAGIC, "conn_pool"))
641 0
                return;
642 6
        VSB_cat(vsb, "ident = ");
643 6
        VSB_quote(vsb, cp->ident, VSHA256_DIGEST_LENGTH, VSB_QUOTE_HEX);
644 6
        VSB_cat(vsb, ",\n");
645 6
        vcp_panic_endpoint(vsb, cp->endpoint);
646 6
        VSB_indent(vsb, -2);
647 6
        VSB_cat(vsb, "},\n");
648 6
}
649
650
/*--------------------------------------------------------------------*/
651
652
void
653 939
VCP_Init(void)
654
{
655 939
        Lck_New(&conn_pools_mtx, lck_conn_pool);
656 939
        Lck_New(&dead_pools_mtx, lck_dead_pool);
657
658 939
        AZ(vsc);
659 939
        vsc = VSC_vcp_New(NULL, NULL, "");
660 939
        AN(vsc);
661 939
}
662
663
/**********************************************************************/
664
665
static inline int
666 1542
tmo2msec(vtim_dur tmo)
667
{
668 1542
        return ((int)floor(tmo * 1000.0));
669
}
670
671
static int v_matchproto_(cp_open_f)
672 1425
vtp_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap)
673
{
674
        int s;
675
        int msec;
676
677 1425
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
678
679 1425
        msec = tmo2msec(tmo);
680 1425
        if (cache_param->prefer_ipv6) {
681 0
                *ap = cp->endpoint->ipv6;
682 0
                s = VTCP_connect(*ap, msec);
683 0
                if (s >= 0)
684 0
                        return (s);
685 0
        }
686 1425
        *ap = cp->endpoint->ipv4;
687 1425
        s = VTCP_connect(*ap, msec);
688 1425
        if (s >= 0)
689 1399
                return (s);
690 26
        if (!cache_param->prefer_ipv6) {
691 26
                *ap = cp->endpoint->ipv6;
692 26
                s = VTCP_connect(*ap, msec);
693 26
        }
694 26
        return (s);
695 1425
}
696
697
698
/*--------------------------------------------------------------------*/
699
700
static void v_matchproto_(cp_close_f)
701 1316
vtp_close(struct pfd *pfd)
702
{
703
704 1316
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
705 1316
        VTCP_close(&pfd->fd);
706 1316
}
707
708
static void v_matchproto_(cp_name_f)
709 2021
vtp_local_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
710
               unsigned plen)
711
{
712 2021
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
713 2021
        VTCP_myname(pfd->fd, addr, alen, pbuf, plen);
714 2021
}
715
716
static void v_matchproto_(cp_name_f)
717 2021
vtp_remote_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
718
                unsigned plen)
719
{
720 2021
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
721 2021
        VTCP_hisname(pfd->fd, addr, alen, pbuf, plen);
722 2021
}
723
724
static const struct cp_methods vtp_methods = {
725
        .open = vtp_open,
726
        .close = vtp_close,
727
        .local_name = vtp_local_name,
728
        .remote_name = vtp_remote_name,
729
};
730
731
/*--------------------------------------------------------------------
732
 */
733
734
static int v_matchproto_(cp_open_f)
735 116
vus_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap)
736
{
737
        int s;
738
        int msec;
739
740 116
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
741 116
        AN(cp->endpoint->uds_path);
742
743 116
        msec = tmo2msec(tmo);
744 116
        *ap = bogo_ip;
745 116
        s = VUS_connect(cp->endpoint->uds_path, msec);
746 116
        return (s);
747
}
748
749
static void v_matchproto_(cp_name_f)
750 222
vus_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
751
         unsigned plen)
752
{
753 222
        (void) pfd;
754 222
        assert(alen > strlen("0.0.0.0"));
755 222
        assert(plen > 1);
756 222
        strcpy(addr, "0.0.0.0");
757 222
        strcpy(pbuf, "0");
758 222
}
759
760
static const struct cp_methods vus_methods = {
761
        .open = vus_open,
762
        .close = vtp_close,
763
        .local_name = vus_name,
764
        .remote_name = vus_name,
765
};
766
767
/*--------------------------------------------------------------------
768
 * Reference a TCP pool given by {ip4, ip6} pair or a UDS.  Create if
769
 * it doesn't exist already.
770
 */
771
772
struct conn_pool *
773 1323
VCP_Ref(const struct vrt_endpoint *vep, const char *ident)
774
{
775
        struct conn_pool *cp, *cp2;
776
        struct VSHA256Context cx[1];
777
        unsigned char digest[VSHA256_DIGEST_LENGTH];
778
779 1323
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
780 1323
        AN(ident);
781 1323
        AN(vsc);
782
783 1323
        VSHA256_Init(cx);
784 1323
        VSHA256_Update(cx, ident, strlen(ident) + 1); // include \0
785 1323
        if (vep->uds_path != NULL) {
786 46
                AZ(vep->ipv4);
787 46
                AZ(vep->ipv6);
788 46
                VSHA256_Update(cx, "UDS", 4); // include \0
789 46
                VSHA256_Update(cx, vep->uds_path, strlen(vep->uds_path));
790 46
        } else {
791 1277
                assert(vep->ipv4 != NULL || vep->ipv6 != NULL);
792 1277
                if (vep->ipv4 != NULL) {
793 1274
                        assert(VSA_Sane(vep->ipv4));
794 1274
                        VSHA256_Update(cx, "IP4", 4); // include \0
795 1274
                        VSHA256_Update(cx, vep->ipv4, vsa_suckaddr_len);
796 1274
                }
797 1277
                if (vep->ipv6 != NULL) {
798 4
                        assert(VSA_Sane(vep->ipv6));
799 4
                        VSHA256_Update(cx, "IP6", 4); // include \0
800 4
                        VSHA256_Update(cx, vep->ipv6, vsa_suckaddr_len);
801 4
                }
802
        }
803 1323
        CHECK_OBJ_ORNULL(vep->preamble, VRT_BLOB_MAGIC);
804 1323
        if (vep->preamble != NULL && vep->preamble->len > 0) {
805 9
                VSHA256_Update(cx, "PRE", 4); // include \0
806 9
                VSHA256_Update(cx, vep->preamble->blob, vep->preamble->len);
807 9
        }
808 1323
        VSHA256_Final(digest, cx);
809
810 1323
        ALLOC_OBJ(cp, CONN_POOL_MAGIC);
811 1323
        AN(cp);
812 1323
        cp->refcnt = 1;
813 1323
        cp->holddown = 0;
814 1323
        cp->endpoint = VRT_Endpoint_Clone(vep);
815 1323
        CHECK_OBJ_NOTNULL(cp->endpoint, VRT_ENDPOINT_MAGIC);
816 1323
        memcpy(cp->ident, digest, sizeof cp->ident);
817 1323
        if (vep->uds_path != NULL)
818 46
                cp->methods = &vus_methods;
819
        else
820 1277
                cp->methods = &vtp_methods;
821 1323
        Lck_New(&cp->mtx, lck_conn_pool);
822 1323
        VTAILQ_INIT(&cp->connlist);
823
824 1323
        Lck_Lock(&conn_pools_mtx);
825 1323
        cp2 = VRBT_INSERT(vrb, &conn_pools, cp);
826 1323
        if (cp2 == NULL) {
827 1024
                vsc->ref_miss++;
828 1024
                Lck_Unlock(&conn_pools_mtx);
829 1024
                return (cp);
830
        }
831
832 299
        CHECK_OBJ(cp2, CONN_POOL_MAGIC);
833 299
        assert(cp2->refcnt > 0);
834 299
        cp2->refcnt++;
835 299
        vsc->ref_hit++;
836 299
        Lck_Unlock(&conn_pools_mtx);
837
838 299
        vcp_destroy(&cp);
839 299
        return (cp2);
840 1323
}