varnish-cache/bin/varnishd/cache/cache_conn_pool.c
0
/*-
1
 * Copyright (c) 2015 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * (TCP|UDS) connection pools.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vsa.h"
40
#include "vsha256.h"
41
#include "vtcp.h"
42
#include "vus.h"
43
#include "vtim.h"
44
#include "waiter/waiter.h"
45
46
#include "cache_conn_pool.h"
47
#include "cache_pool.h"
48
49
#include "VSC_vcp.h"
50
51
struct conn_pool;
52
static inline int vcp_cmp(const struct conn_pool *a, const struct conn_pool *b);
53
54
/*--------------------------------------------------------------------
55
 */
56
57
struct pfd {
58
        unsigned                magic;
59
#define PFD_MAGIC               0x0c5e6593
60
        int                     fd;
61
        VTAILQ_ENTRY(pfd)       list;
62
        VCL_IP                  addr;
63
        uint8_t                 state;
64
        struct waited           waited[1];
65
        struct conn_pool        *conn_pool;
66
67
        pthread_cond_t          *cond;
68
};
69
70
/*--------------------------------------------------------------------
71
 */
72
73
typedef int cp_open_f(const struct conn_pool *, vtim_dur tmo, VCL_IP *ap);
74
typedef void cp_close_f(struct pfd *);
75
typedef void cp_name_f(const struct pfd *, char *, unsigned, char *, unsigned);
76
77
struct cp_methods {
78
        cp_open_f                               *open;
79
        cp_close_f                              *close;
80
        cp_name_f                               *local_name;
81
        cp_name_f                               *remote_name;
82
};
83
84
struct conn_pool {
85
        unsigned                                magic;
86
#define CONN_POOL_MAGIC                         0x85099bc3
87
88
        const struct cp_methods                 *methods;
89
90
        struct vrt_endpoint                     *endpoint;
91
        char                                    ident[VSHA256_DIGEST_LENGTH];
92
93
        VRBT_ENTRY(conn_pool)                   entry;
94
        int                                     refcnt;
95
        struct lock                             mtx;
96
97
        VTAILQ_HEAD(, pfd)                      connlist;
98
        int                                     n_conn;
99
100
        int                                     n_kill;
101
102
        int                                     n_used;
103
104
        vtim_mono                               holddown;
105
        int                                     holddown_errno;
106
};
107
108
static struct lock conn_pools_mtx;
109
static struct lock dead_pools_mtx;
110
static struct VSC_vcp *vsc;
111
112
VRBT_HEAD(vrb, conn_pool);
113 652
VRBT_GENERATE_REMOVE_COLOR(vrb, conn_pool, entry, static)
114 1679
VRBT_GENERATE_REMOVE(vrb, conn_pool, entry, static)
115 8736
VRBT_GENERATE_INSERT_COLOR(vrb, conn_pool, entry, static)
116 41018
VRBT_GENERATE_INSERT_FINISH(vrb, conn_pool, entry, static)
117 62287
VRBT_GENERATE_INSERT(vrb, conn_pool, entry, vcp_cmp, static)
118 98
VRBT_GENERATE_NEXT(vrb, conn_pool, entry, static)
119 196
VRBT_GENERATE_MINMAX(vrb, conn_pool, entry, static)
120
121
static struct vrb conn_pools = VRBT_INITIALIZER(&conn_pools);
122
static struct vrb dead_pools = VRBT_INITIALIZER(&dying_cps);
123
124
/*--------------------------------------------------------------------
125
 */
126
127
unsigned
128 405245
PFD_State(const struct pfd *p)
129
{
130 405245
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
131 405245
        return (p->state);
132
}
133
134
int *
135 171234
PFD_Fd(struct pfd *p)
136
{
137 171234
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
138 171234
        return (&(p->fd));
139
}
140
141
void
142 85160
PFD_LocalName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf,
143
              unsigned plen)
144
{
145 85160
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
146 85160
        CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC);
147 85160
        p->conn_pool->methods->local_name(p, abuf, alen, pbuf, plen);
148 85160
}
149
150
void
151 85159
PFD_RemoteName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf,
152
               unsigned plen)
153
{
154 85159
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
155 85159
        CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC);
156 85159
        p->conn_pool->methods->remote_name(p, abuf, alen, pbuf, plen);
157 85159
}
158
159
/*--------------------------------------------------------------------
160
 */
161
162
static inline int
163 21269
vcp_cmp(const struct conn_pool *a, const struct conn_pool *b)
164
{
165 21269
        return (memcmp(a->ident, b->ident, sizeof b->ident));
166
}
167
168
/*--------------------------------------------------------------------
169
 * Waiter-handler
170
 */
171
172
static void  v_matchproto_(waiter_handle_f)
173 69082
vcp_handle(struct waited *w, enum wait_event ev, vtim_real now)
174
{
175
        struct pfd *pfd;
176
        struct conn_pool *cp;
177
178 69082
        CHECK_OBJ_NOTNULL(w, WAITED_MAGIC);
179 69082
        CAST_OBJ_NOTNULL(pfd, w->priv1, PFD_MAGIC);
180 69082
        (void)ev;
181 69082
        (void)now;
182 69082
        CHECK_OBJ_NOTNULL(pfd->conn_pool, CONN_POOL_MAGIC);
183 69082
        cp = pfd->conn_pool;
184
185 69082
        Lck_Lock(&cp->mtx);
186
187 69082
        switch (pfd->state) {
188
        case PFD_STATE_STOLEN:
189 31755
                pfd->state = PFD_STATE_USED;
190 31755
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
191 31755
                AN(pfd->cond);
192 31755
                PTOK(pthread_cond_signal(pfd->cond));
193 31755
                break;
194
        case PFD_STATE_AVAIL:
195 37166
                cp->methods->close(pfd);
196 37166
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
197 37166
                cp->n_conn--;
198 37166
                FREE_OBJ(pfd);
199 37166
                break;
200
        case PFD_STATE_CLEANUP:
201 161
                cp->methods->close(pfd);
202 161
                cp->n_kill--;
203 161
                memset(pfd, 0x11, sizeof *pfd);
204 161
                free(pfd);
205 161
                break;
206
        default:
207 0
                WRONG("Wrong pfd state");
208 0
        }
209 69082
        Lck_Unlock(&cp->mtx);
210 69082
}
211
212
213
/*--------------------------------------------------------------------
214
 */
215
216
void
217 1480
VCP_AddRef(struct conn_pool *cp)
218
{
219 1480
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
220
221 1480
        Lck_Lock(&conn_pools_mtx);
222 1480
        assert(cp->refcnt > 0);
223 1480
        cp->refcnt++;
224 1480
        Lck_Unlock(&conn_pools_mtx);
225 1480
}
226
227
/*--------------------------------------------------------------------
228
 */
229
230
static void
231 12960
vcp_destroy(struct conn_pool **cpp)
232
{
233
        struct conn_pool *cp;
234
235 12960
        TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC);
236 12960
        AZ(cp->n_conn);
237 12960
        AZ(cp->n_kill);
238 12960
        Lck_Delete(&cp->mtx);
239 12960
        FREE_OBJ(cp->endpoint);
240 12960
        FREE_OBJ(cp);
241 12960
}
242
243
/*--------------------------------------------------------------------
244
 * Release Conn pool, destroy or stash for future destruction if last
245
 * reference.
246
 */
247
248
void
249 3458
VCP_Rel(struct conn_pool **cpp)
250
{
251
        struct conn_pool *cp;
252
        struct pfd *pfd, *pfd2;
253
        int n_kill;
254
255 3458
        TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC);
256
257 3458
        Lck_Lock(&conn_pools_mtx);
258 3458
        assert(cp->refcnt > 0);
259 3458
        if (--cp->refcnt > 0) {
260 2378
                Lck_Unlock(&conn_pools_mtx);
261 2378
                return;
262
        }
263 1080
        AZ(cp->n_used);
264 1080
        VRBT_REMOVE(vrb, &conn_pools, cp);
265 1080
        Lck_Unlock(&conn_pools_mtx);
266
267 1080
        Lck_Lock(&cp->mtx);
268 1160
        VTAILQ_FOREACH_SAFE(pfd, &cp->connlist, list, pfd2) {
269 80
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
270 80
                cp->n_conn--;
271 80
                assert(pfd->state == PFD_STATE_AVAIL);
272 80
                pfd->state = PFD_STATE_CLEANUP;
273 80
                (void)shutdown(pfd->fd, SHUT_RDWR);
274 80
                cp->n_kill++;
275 80
        }
276 1080
        n_kill = cp->n_kill;
277 1080
        Lck_Unlock(&cp->mtx);
278 1080
        if (n_kill == 0) {
279 1000
                vcp_destroy(&cp);
280 1000
                return;
281
        }
282 80
        Lck_Lock(&dead_pools_mtx);
283
        /*
284
         * Here we reuse cp's entry but it will probably not be correctly
285
         * indexed because of the hack in VCP_RelPoll
286
         */
287 80
        VRBT_INSERT(vrb, &dead_pools, cp);
288 80
        Lck_Unlock(&dead_pools_mtx);
289 3458
}
290
291
void
292 304511
VCP_RelPoll(void)
293
{
294
        struct vrb dead;
295
        struct conn_pool *cp, *cp2;
296
        int n_kill;
297
298 304511
        ASSERT_CLI();
299
300 304511
        Lck_Lock(&dead_pools_mtx);
301 304511
        if (VRBT_EMPTY(&dead_pools)) {
302 304413
                Lck_Unlock(&dead_pools_mtx);
303 304413
                return;
304
        }
305 98
        dead = dead_pools;
306 98
        VRBT_INIT(&dead_pools);
307 98
        Lck_Unlock(&dead_pools_mtx);
308
309 196
        VRBT_FOREACH_SAFE(cp, vrb, &dead, cp2) {
310 98
                CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
311 98
                Lck_Lock(&cp->mtx);
312 98
                n_kill = cp->n_kill;
313 98
                Lck_Unlock(&cp->mtx);
314 98
                if (n_kill > 0)
315 18
                        continue;
316 80
                VRBT_REMOVE(vrb, &dead, cp);
317 80
                vcp_destroy(&cp);
318 80
        }
319
320 98
        if (VRBT_EMPTY(&dead))
321 80
                return;
322
323 18
        Lck_Lock(&dead_pools_mtx);
324
        /*
325
         * The following insertion will most likely result in an
326
         * unordered tree, but in this case it does not matter
327
         * as we just want to iterate over all the elements
328
         * in the tree in order to delete them.
329
         */
330 18
        VRBT_INSERT(vrb, &dead_pools, dead.rbh_root);
331 18
        Lck_Unlock(&dead_pools_mtx);
332 304511
}
333
334
/*--------------------------------------------------------------------
335
 * Recycle a connection.
336
 */
337
338
void
339 69876
VCP_Recycle(const struct worker *wrk, struct pfd **pfdp)
340
{
341
        struct pfd *pfd;
342
        struct conn_pool *cp;
343 69876
        int i = 0;
344
345 69876
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
346 69876
        TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC);
347 69876
        cp = pfd->conn_pool;
348 69876
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
349
350 69876
        assert(pfd->state == PFD_STATE_USED);
351 69876
        assert(pfd->fd > 0);
352
353 69876
        Lck_Lock(&cp->mtx);
354 69876
        cp->n_used--;
355
356 69876
        pfd->waited->priv1 = pfd;
357 69876
        pfd->waited->fd = pfd->fd;
358 69876
        pfd->waited->idle = VTIM_real();
359 69876
        pfd->state = PFD_STATE_AVAIL;
360 69876
        pfd->waited->func = vcp_handle;
361 69876
        pfd->waited->tmo = cache_param->backend_idle_timeout;
362 69876
        if (Wait_Enter(wrk->pool->waiter, pfd->waited)) {
363 0
                cp->methods->close(pfd);
364 0
                memset(pfd, 0x33, sizeof *pfd);
365 0
                free(pfd);
366
                // XXX: stats
367 0
                pfd = NULL;
368 0
        } else {
369 69876
                VTAILQ_INSERT_HEAD(&cp->connlist, pfd, list);
370 69876
                i++;
371
        }
372
373 69876
        if (pfd != NULL)
374 69876
                cp->n_conn++;
375 69876
        Lck_Unlock(&cp->mtx);
376
377 69876
        if (i && DO_DEBUG(DBG_VTC_MODE)) {
378
                /*
379
                 * In varnishtest we do not have the luxury of using
380
                 * multiple backend connections, so whenever we end up
381
                 * in the "pending" case, take a short nap to let the
382
                 * waiter catch up and put the pfd back into circulations.
383
                 *
384
                 * In particular ESI:include related tests suffer random
385
                 * failures without this.
386
                 *
387
                 * In normal operation, the only effect is that we will
388
                 * have N+1 backend connections rather than N, which is
389
                 * entirely harmless.
390
                 */
391 69876
                VTIM_sleep(0.01);
392 69876
        }
393 69876
}
394
395
/*--------------------------------------------------------------------
396
 * Open a new connection from pool.
397
 */
398
399
int
400 62338
VCP_Open(struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap, int *err)
401
{
402
        int r;
403
        vtim_mono h;
404
405 62338
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
406 62338
        AN(err);
407
408 62338
        while (cp->holddown > 0) {
409 862
                Lck_Lock(&cp->mtx);
410 862
                if (cp->holddown == 0) {
411 0
                        Lck_Unlock(&cp->mtx);
412 0
                        break;
413
                }
414
415 862
                if (VTIM_mono() >= cp->holddown) {
416 110
                        cp->holddown = 0;
417 110
                        Lck_Unlock(&cp->mtx);
418 110
                        break;
419
                }
420
421 752
                *err = 0;
422 752
                errno = cp->holddown_errno;
423 752
                Lck_Unlock(&cp->mtx);
424 752
                return (-1);
425
        }
426
427 61586
        *err = errno = 0;
428 61586
        r = cp->methods->open(cp, tmo, ap);
429
430 61586
        if (r >= 0 && errno == 0 && cp->endpoint->preamble != NULL &&
431 360
             cp->endpoint->preamble->len > 0) {
432 1080
                if (write(r, cp->endpoint->preamble->blob,
433 720
                    cp->endpoint->preamble->len) !=
434 360
                    cp->endpoint->preamble->len) {
435 0
                        *err = errno;
436 0
                        closefd(&r);
437 0
                }
438 360
        } else {
439 61226
                *err = errno;
440
        }
441
442 61586
        if (r >= 0)
443 60604
                return (r);
444
445 982
        h = 0;
446
447 982
        switch (errno) {
448
        case EACCES:
449
        case EPERM:
450 0
                h = cache_param->backend_local_error_holddown;
451 0
                break;
452
        case EADDRNOTAVAIL:
453 0
                h = cache_param->backend_local_error_holddown;
454 0
                break;
455
        case ECONNREFUSED:
456 982
                h = cache_param->backend_remote_error_holddown;
457 982
                break;
458
        case ENETUNREACH:
459 0
                h = cache_param->backend_remote_error_holddown;
460 0
                break;
461
        default:
462 0
                break;
463
        }
464
465 982
        if (h == 0)
466 0
                return (r);
467
468 982
        Lck_Lock(&cp->mtx);
469 982
        h += VTIM_mono();
470 982
        if (cp->holddown == 0 || h < cp->holddown) {
471 880
                cp->holddown = h;
472 880
                cp->holddown_errno = errno;
473 880
        }
474
475 982
        Lck_Unlock(&cp->mtx);
476
477 982
        return (r);
478 62338
}
479
480
/*--------------------------------------------------------------------
481
 * Close a connection.
482
 */
483
484
void
485 15244
VCP_Close(struct pfd **pfdp)
486
{
487
        struct pfd *pfd;
488
        struct conn_pool *cp;
489
490 15244
        TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC);
491 15244
        cp = pfd->conn_pool;
492 15244
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
493
494 15244
        assert(pfd->fd > 0);
495
496 15244
        Lck_Lock(&cp->mtx);
497 15244
        assert(pfd->state == PFD_STATE_USED || pfd->state == PFD_STATE_STOLEN);
498 15244
        cp->n_used--;
499 15244
        if (pfd->state == PFD_STATE_STOLEN) {
500 81
                (void)shutdown(pfd->fd, SHUT_RDWR);
501 81
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
502 81
                pfd->state = PFD_STATE_CLEANUP;
503 81
                cp->n_kill++;
504 81
        } else {
505 15163
                assert(pfd->state == PFD_STATE_USED);
506 15163
                cp->methods->close(pfd);
507 15163
                memset(pfd, 0x44, sizeof *pfd);
508 15163
                free(pfd);
509
        }
510 15244
        Lck_Unlock(&cp->mtx);
511 15244
}
512
513
/*--------------------------------------------------------------------
514
 * Get a connection, possibly recycled
515
 */
516
517
struct pfd *
518 86400
VCP_Get(struct conn_pool *cp, vtim_dur tmo, struct worker *wrk,
519
    unsigned force_fresh, int *err)
520
{
521
        struct pfd *pfd;
522
523 86400
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
524 86400
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
525 86400
        AN(err);
526
527 86400
        *err = 0;
528 86400
        Lck_Lock(&cp->mtx);
529 86400
        pfd = VTAILQ_FIRST(&cp->connlist);
530 86400
        CHECK_OBJ_ORNULL(pfd, PFD_MAGIC);
531 86400
        if (force_fresh || pfd == NULL || pfd->state == PFD_STATE_STOLEN) {
532 54564
                pfd = NULL;
533 54564
        } else {
534 31836
                assert(pfd->conn_pool == cp);
535 31836
                assert(pfd->state == PFD_STATE_AVAIL);
536 31836
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
537 31836
                VTAILQ_INSERT_TAIL(&cp->connlist, pfd, list);
538 31836
                cp->n_conn--;
539 31836
                VSC_C_main->backend_reuse++;
540 31836
                pfd->state = PFD_STATE_STOLEN;
541 31836
                pfd->cond = &wrk->cond;
542
        }
543 86400
        cp->n_used++;                   // Opening mostly works
544 86400
        Lck_Unlock(&cp->mtx);
545
546 86400
        if (pfd != NULL)
547 31836
                return (pfd);
548
549 54564
        ALLOC_OBJ(pfd, PFD_MAGIC);
550 54564
        AN(pfd);
551 54564
        INIT_OBJ(pfd->waited, WAITED_MAGIC);
552 54564
        pfd->state = PFD_STATE_USED;
553 54564
        pfd->conn_pool = cp;
554 54564
        pfd->fd = VCP_Open(cp, tmo, &pfd->addr, err);
555 54564
        if (pfd->fd < 0) {
556 1240
                FREE_OBJ(pfd);
557 1240
                Lck_Lock(&cp->mtx);
558 1240
                cp->n_used--;           // Nope, didn't work after all.
559 1240
                Lck_Unlock(&cp->mtx);
560 1240
        } else
561 53324
                VSC_C_main->backend_conn++;
562
563 54564
        return (pfd);
564 86400
}
565
566
/*--------------------------------------------------------------------
567
 */
568
569
int
570 31676
VCP_Wait(struct worker *wrk, struct pfd *pfd, vtim_real when)
571
{
572
        struct conn_pool *cp;
573
        int r;
574
575 31676
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
576 31676
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
577 31676
        cp = pfd->conn_pool;
578 31676
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
579 31676
        assert(pfd->cond == &wrk->cond);
580 31676
        Lck_Lock(&cp->mtx);
581 63312
        while (pfd->state == PFD_STATE_STOLEN) {
582 31676
                r = Lck_CondWaitUntil(&wrk->cond, &cp->mtx, when);
583 31676
                if (r != 0) {
584 40
                        if (r == EINTR)
585 0
                                continue;
586 40
                        assert(r == ETIMEDOUT);
587 40
                        Lck_Unlock(&cp->mtx);
588 40
                        return (1);
589
                }
590
        }
591 31636
        assert(pfd->state == PFD_STATE_USED);
592 31636
        pfd->cond = NULL;
593 31636
        Lck_Unlock(&cp->mtx);
594
595 31636
        return (0);
596 31676
}
597
598
/*--------------------------------------------------------------------
599
 */
600
601
VCL_IP
602 40
VCP_GetIp(struct pfd *pfd)
603
{
604
605 40
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
606 40
        return (pfd->addr);
607
}
608
609
/*--------------------------------------------------------------------*/
610
611
static void
612 240
vcp_panic_endpoint(struct vsb *vsb, const struct vrt_endpoint *vep)
613
{
614
        char h[VTCP_ADDRBUFSIZE];
615
        char p[VTCP_PORTBUFSIZE];
616
617 240
        if (PAN_dump_struct(vsb, vep, VRT_ENDPOINT_MAGIC, "vrt_endpoint"))
618 0
                return;
619 240
        if (vep->uds_path)
620 0
                VSB_printf(vsb, "uds_path = %s,\n", vep->uds_path);
621 240
        if (vep->ipv4 && VSA_Sane(vep->ipv4)) {
622 240
                VTCP_name(vep->ipv4, h, sizeof h, p, sizeof p);
623 240
                VSB_printf(vsb, "ipv4 = %s, ", h);
624 240
                VSB_printf(vsb, "port = %s,\n", p);
625 240
        }
626 240
        if (vep->ipv6 && VSA_Sane(vep->ipv6)) {
627 0
                VTCP_name(vep->ipv6, h, sizeof h, p, sizeof p);
628 0
                VSB_printf(vsb, "ipv6 = %s, ", h);
629 0
                VSB_printf(vsb, "port = %s,\n", p);
630 0
        }
631 240
        VSB_indent(vsb, -2);
632 240
        VSB_cat(vsb, "},\n");
633 240
}
634
635
void
636 240
VCP_Panic(struct vsb *vsb, struct conn_pool *cp)
637
{
638
639 240
        if (PAN_dump_struct(vsb, cp, CONN_POOL_MAGIC, "conn_pool"))
640 0
                return;
641 240
        VSB_cat(vsb, "ident = ");
642 240
        VSB_quote(vsb, cp->ident, VSHA256_DIGEST_LENGTH, VSB_QUOTE_HEX);
643 240
        VSB_cat(vsb, ",\n");
644 240
        vcp_panic_endpoint(vsb, cp->endpoint);
645 240
        VSB_indent(vsb, -2);
646 240
        VSB_cat(vsb, "},\n");
647 240
}
648
649
/*--------------------------------------------------------------------*/
650
651
void
652 37391
VCP_Init(void)
653
{
654 37391
        Lck_New(&conn_pools_mtx, lck_conn_pool);
655 37391
        Lck_New(&dead_pools_mtx, lck_dead_pool);
656
657 37391
        AZ(vsc);
658 37391
        vsc = VSC_vcp_New(NULL, NULL, "");
659 37391
        AN(vsc);
660 37391
}
661
662
/**********************************************************************/
663
664
static inline int
665 61625
tmo2msec(vtim_dur tmo)
666
{
667 61625
        return ((int)floor(tmo * 1000.0));
668
}
669
670
static int v_matchproto_(cp_open_f)
671 56845
vtp_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap)
672
{
673
        int s;
674
        int msec;
675
676 56845
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
677
678 56845
        msec = tmo2msec(tmo);
679 56845
        if (cache_param->prefer_ipv6) {
680 0
                *ap = cp->endpoint->ipv6;
681 0
                s = VTCP_connect(*ap, msec);
682 0
                if (s >= 0)
683 0
                        return (s);
684 0
        }
685 56845
        *ap = cp->endpoint->ipv4;
686 56845
        s = VTCP_connect(*ap, msec);
687 56845
        if (s >= 0)
688 55763
                return (s);
689 1082
        if (!cache_param->prefer_ipv6) {
690 1082
                *ap = cp->endpoint->ipv6;
691 1082
                s = VTCP_connect(*ap, msec);
692 1082
        }
693 1082
        return (s);
694 56845
}
695
696
697
/*--------------------------------------------------------------------*/
698
699
static void v_matchproto_(cp_close_f)
700 52488
vtp_close(struct pfd *pfd)
701
{
702
703 52488
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
704 52488
        VTCP_close(&pfd->fd);
705 52488
}
706
707
static void v_matchproto_(cp_name_f)
708 80720
vtp_local_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
709
               unsigned plen)
710
{
711 80720
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
712 80720
        VTCP_myname(pfd->fd, addr, alen, pbuf, plen);
713 80720
}
714
715
static void v_matchproto_(cp_name_f)
716 80720
vtp_remote_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
717
                unsigned plen)
718
{
719 80720
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
720 80720
        VTCP_hisname(pfd->fd, addr, alen, pbuf, plen);
721 80720
}
722
723
static const struct cp_methods vtp_methods = {
724
        .open = vtp_open,
725
        .close = vtp_close,
726
        .local_name = vtp_local_name,
727
        .remote_name = vtp_remote_name,
728
};
729
730
/*--------------------------------------------------------------------
731
 */
732
733
static int v_matchproto_(cp_open_f)
734 4741
vus_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap)
735
{
736
        int s;
737
        int msec;
738
739 4741
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
740 4741
        AN(cp->endpoint->uds_path);
741
742 4741
        msec = tmo2msec(tmo);
743 4741
        *ap = bogo_ip;
744 4741
        s = VUS_connect(cp->endpoint->uds_path, msec);
745 4741
        return (s);
746
}
747
748
static void v_matchproto_(cp_name_f)
749 8879
vus_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
750
         unsigned plen)
751
{
752 8879
        (void) pfd;
753 8879
        assert(alen > strlen("0.0.0.0"));
754 8879
        assert(plen > 1);
755 8879
        strcpy(addr, "0.0.0.0");
756 8879
        strcpy(pbuf, "0");
757 8879
}
758
759
static const struct cp_methods vus_methods = {
760
        .open = vus_open,
761
        .close = vtp_close,
762
        .local_name = vus_name,
763
        .remote_name = vus_name,
764
};
765
766
/*--------------------------------------------------------------------
767
 * Reference a TCP pool given by {ip4, ip6} pair or a UDS.  Create if
768
 * it doesn't exist already.
769
 */
770
771
struct conn_pool *
772 52800
VCP_Ref(const struct vrt_endpoint *vep, const char *ident)
773
{
774
        struct conn_pool *cp, *cp2;
775
        struct VSHA256Context cx[1];
776
        unsigned char digest[VSHA256_DIGEST_LENGTH];
777
778 52800
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
779 52800
        AN(ident);
780 52800
        AN(vsc);
781
782 52800
        VSHA256_Init(cx);
783 52800
        VSHA256_Update(cx, ident, strlen(ident) + 1); // include \0
784 52800
        if (vep->uds_path != NULL) {
785 1840
                AZ(vep->ipv4);
786 1840
                AZ(vep->ipv6);
787 1840
                VSHA256_Update(cx, "UDS", 4); // include \0
788 1840
                VSHA256_Update(cx, vep->uds_path, strlen(vep->uds_path));
789 1840
        } else {
790 50960
                assert(vep->ipv4 != NULL || vep->ipv6 != NULL);
791 50960
                if (vep->ipv4 != NULL) {
792 50840
                        assert(VSA_Sane(vep->ipv4));
793 50840
                        VSHA256_Update(cx, "IP4", 4); // include \0
794 50840
                        VSHA256_Update(cx, vep->ipv4, vsa_suckaddr_len);
795 50840
                }
796 50960
                if (vep->ipv6 != NULL) {
797 160
                        assert(VSA_Sane(vep->ipv6));
798 160
                        VSHA256_Update(cx, "IP6", 4); // include \0
799 160
                        VSHA256_Update(cx, vep->ipv6, vsa_suckaddr_len);
800 160
                }
801
        }
802 52800
        if (vep->preamble != NULL && vep->preamble->len > 0) {
803 360
                VSHA256_Update(cx, "PRE", 4); // include \0
804 360
                VSHA256_Update(cx, vep->preamble->blob, vep->preamble->len);
805 360
        }
806 52800
        VSHA256_Final(digest, cx);
807
808 52800
        ALLOC_OBJ(cp, CONN_POOL_MAGIC);
809 52800
        AN(cp);
810 52800
        cp->refcnt = 1;
811 52800
        cp->holddown = 0;
812 52800
        cp->endpoint = VRT_Endpoint_Clone(vep);
813 52800
        CHECK_OBJ_NOTNULL(cp->endpoint, VRT_ENDPOINT_MAGIC);
814 52800
        memcpy(cp->ident, digest, sizeof cp->ident);
815 52800
        if (vep->uds_path != NULL)
816 1840
                cp->methods = &vus_methods;
817
        else
818 50960
                cp->methods = &vtp_methods;
819 52800
        Lck_New(&cp->mtx, lck_conn_pool);
820 52800
        VTAILQ_INIT(&cp->connlist);
821
822 52800
        Lck_Lock(&conn_pools_mtx);
823 52800
        cp2 = VRBT_INSERT(vrb, &conn_pools, cp);
824 52800
        if (cp2 == NULL) {
825 40920
                vsc->ref_miss++;
826 40920
                Lck_Unlock(&conn_pools_mtx);
827 40920
                return (cp);
828
        }
829
830 11880
        CHECK_OBJ(cp2, CONN_POOL_MAGIC);
831 11880
        assert(cp2->refcnt > 0);
832 11880
        cp2->refcnt++;
833 11880
        vsc->ref_hit++;
834 11880
        Lck_Unlock(&conn_pools_mtx);
835
836 11880
        vcp_destroy(&cp);
837 11880
        return (cp2);
838 52800
}