varnish-cache/bin/varnishd/cache/cache_acceptor.c
1
/*-
2
 * Copyright (c) 2006 Verdens Gang AS
3
 * Copyright (c) 2006-2015 Varnish Software AS
4
 * All rights reserved.
5
 *
6
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * This source file has the various trickery surrounding the accept/listen
30
 * sockets.
31
 *
32
 */
33
34
#include "config.h"
35
36
#include <errno.h>
37
#include <stdlib.h>
38
#include <netinet/in.h>
39
#include <netinet/tcp.h>
40
41
#include "cache_varnishd.h"
42
43
#include "cache_transport.h"
44
#include "cache_pool.h"
45
#include "common/heritage.h"
46
47
#include "vcli_serve.h"
48
#include "vsa.h"
49
#include "vtcp.h"
50
#include "vtim.h"
51
52
static pthread_t        VCA_thread;
53
static double vca_pace = 0.0;
54
static struct lock pace_mtx;
55
static unsigned pool_accepting;
56
static pthread_mutex_t shut_mtx = PTHREAD_MUTEX_INITIALIZER;
57
58
struct wrk_accept {
59
        unsigned                magic;
60
#define WRK_ACCEPT_MAGIC        0x8c4b4d59
61
62
        /* Accept stuff */
63
        struct sockaddr_storage acceptaddr;
64
        socklen_t               acceptaddrlen;
65
        int                     acceptsock;
66
        struct listen_sock      *acceptlsock;
67
};
68
69
struct poolsock {
70
        unsigned                        magic;
71
#define POOLSOCK_MAGIC                  0x1b0a2d38
72
        VTAILQ_ENTRY(poolsock)          list;
73
        struct listen_sock              *lsock;
74
        struct pool_task                task;
75
        struct pool                     *pool;
76
};
77
78
/*--------------------------------------------------------------------
79
 * TCP options we want to control
80
 */
81
82
static struct tcp_opt {
83
        int             level;
84
        int             optname;
85
        const char      *strname;
86
        socklen_t       sz;
87
        void            *ptr;
88
        int             need;
89
} tcp_opts[] = {
90
#define TCPO(lvl, nam, sz) { lvl, nam, #nam, sizeof(sz), 0, 0},
91
92
        TCPO(SOL_SOCKET, SO_LINGER, struct linger)
93
        TCPO(SOL_SOCKET, SO_KEEPALIVE, int)
94
        TCPO(IPPROTO_TCP, TCP_NODELAY, int)
95
96
#ifdef SO_SNDTIMEO_WORKS
97
        TCPO(SOL_SOCKET, SO_SNDTIMEO, struct timeval)
98
#endif
99
100
#ifdef SO_RCVTIMEO_WORKS
101
        TCPO(SOL_SOCKET, SO_RCVTIMEO, struct timeval)
102
#endif
103
104
#ifdef HAVE_TCP_KEEP
105
        TCPO(IPPROTO_TCP, TCP_KEEPIDLE, int)
106
        TCPO(IPPROTO_TCP, TCP_KEEPCNT, int)
107
        TCPO(IPPROTO_TCP, TCP_KEEPINTVL, int)
108
#endif
109
110
#undef TCPO
111
};
112
113
static const int n_tcp_opts = sizeof tcp_opts / sizeof tcp_opts[0];
114
115
/*--------------------------------------------------------------------
116
 * We want to get out of any kind of trouble-hit TCP connections as fast
117
 * as absolutely possible, so we set them LINGER enabled with zero timeout,
118
 * so that even if there are outstanding write data on the socket, a close(2)
119
 * will return immediately.
120
 */
121
static const struct linger linger = {
122
        .l_onoff        =       0,
123
};
124
125
/*
126
 * We turn on keepalives by default to assist in detecting clients that have
127
 * hung up on connections returning from waitinglists
128
 */
129
130
static unsigned         need_test;
131
132
/*--------------------------------------------------------------------
133
 * Some kernels have bugs/limitations with respect to which options are
134
 * inherited from the accept/listen socket, so we have to keep track of
135
 * which, if any, sockopts we have to set on the accepted socket.
136
 */
137
138
static int
139 1949
vca_tcp_opt_init(void)
140
{
141
        int n;
142 1949
        int one = 1;
143
        struct tcp_opt *to;
144
        struct timeval tv;
145 1949
        int chg = 0;
146
        int x;
147
148 1949
        memset(&tv, 0, sizeof tv);
149 1949
        memset(&x, 0, sizeof x);
150
151 17541
        for (n = 0; n < n_tcp_opts; n++) {
152 15592
                to = &tcp_opts[n];
153 15592
                if (to->ptr == NULL)
154 4904
                        to->ptr = calloc(1, to->sz);
155 15592
                AN(to->ptr);
156 15592
                if (!strcmp(to->strname, "SO_LINGER")) {
157 1949
                        assert(to->sz == sizeof linger);
158 1949
                        memcpy(to->ptr, &linger, sizeof linger);
159 1949
                        to->need = 1;
160 13643
                } else if (!strcmp(to->strname, "TCP_NODELAY")) {
161 1949
                        assert(to->sz == sizeof one);
162 1949
                        memcpy(to->ptr, &one, sizeof one);
163 1949
                        to->need = 1;
164 11694
                } else if (!strcmp(to->strname, "SO_KEEPALIVE")) {
165 1949
                        assert(to->sz == sizeof one);
166 1949
                        memcpy(to->ptr, &one, sizeof one);
167 1949
                        to->need = 1;
168
#define NEW_VAL(to, xx)                                         \
169
        do {                                                    \
170
                assert(to->sz == sizeof xx);                    \
171
                if (memcmp(to->ptr, &(xx), sizeof xx)) {        \
172
                        memcpy(to->ptr, &(xx), sizeof xx);      \
173
                        to->need = 1;                           \
174
                        chg = 1;                                \
175
                        need_test = 1;                          \
176
                }                                               \
177
        } while (0)
178
179
#ifdef SO_SNDTIMEO_WORKS
180 9745
                } else if (!strcmp(to->strname, "SO_SNDTIMEO")) {
181 1949
                        tv = VTIM_timeval(cache_param->idle_send_timeout);
182 1949
                        NEW_VAL(to, tv);
183
#endif
184
#ifdef SO_RCVTIMEO_WORKS
185 7796
                } else if (!strcmp(to->strname, "SO_RCVTIMEO")) {
186 1949
                        tv = VTIM_timeval(cache_param->timeout_idle);
187 1949
                        NEW_VAL(to, tv);
188
#endif
189
#ifdef HAVE_TCP_KEEP
190 5847
                } else if (!strcmp(to->strname, "TCP_KEEPIDLE")) {
191 1949
                        x = (int)(cache_param->tcp_keepalive_time);
192 1949
                        NEW_VAL(to, x);
193 3898
                } else if (!strcmp(to->strname, "TCP_KEEPCNT")) {
194 1949
                        x = (int)(cache_param->tcp_keepalive_probes);
195 1949
                        NEW_VAL(to, x);
196 1949
                } else if (!strcmp(to->strname, "TCP_KEEPINTVL")) {
197 1949
                        x = (int)(cache_param->tcp_keepalive_intvl);
198 1949
                        NEW_VAL(to, x);
199
#endif
200
                }
201
        }
202 1949
        return (chg);
203
}
204
205
static void
206 580
vca_tcp_opt_test(int sock)
207
{
208
        int i, n;
209
        struct tcp_opt *to;
210
        socklen_t l;
211
        void *ptr;
212
213 5211
        for (n = 0; n < n_tcp_opts; n++) {
214 4632
                to = &tcp_opts[n];
215 4632
                to->need = 1;
216 4632
                ptr = calloc(1, to->sz);
217 4632
                AN(ptr);
218 4632
                l = to->sz;
219 4632
                i = getsockopt(sock, to->level, to->optname, ptr, &l);
220 4631
                if (i == 0 && !memcmp(ptr, to->ptr, to->sz))
221 3474
                        to->need = 0;
222 4631
                free(ptr);
223 4631
                if (i && errno != ENOPROTOOPT)
224 0
                        VTCP_Assert(i);
225
        }
226 579
}
227
228
static void
229 1707
vca_tcp_opt_set(int sock, int force)
230
{
231
        int n;
232
        struct tcp_opt *to;
233
234 15363
        for (n = 0; n < n_tcp_opts; n++) {
235 13656
                to = &tcp_opts[n];
236 13656
                if (to->need || force) {
237 7377
                        VTCP_Assert(setsockopt(sock,
238
                            to->level, to->optname, to->ptr, to->sz));
239
                }
240
        }
241 1707
}
242
243
/*--------------------------------------------------------------------
244
 * If accept(2)'ing fails, we pace ourselves to relive any resource
245
 * shortage if possible.
246
 */
247
248
static void
249 2310
vca_pace_check(void)
250
{
251
        double p;
252
253 2310
        if (vca_pace == 0.0)
254 4620
                return;
255 0
        Lck_Lock(&pace_mtx);
256 0
        p = vca_pace;
257 0
        Lck_Unlock(&pace_mtx);
258 0
        if (p > 0.0)
259 0
                VTIM_sleep(p);
260
}
261
262
static void
263 0
vca_pace_bad(void)
264
{
265
266 0
        Lck_Lock(&pace_mtx);
267 0
        vca_pace += cache_param->acceptor_sleep_incr;
268 0
        if (vca_pace > cache_param->acceptor_sleep_max)
269 0
                vca_pace = cache_param->acceptor_sleep_max;
270 0
        Lck_Unlock(&pace_mtx);
271 0
}
272
273
static void
274 1093
vca_pace_good(void)
275
{
276
277 1093
        if (vca_pace == 0.0)
278 2186
                return;
279 0
        Lck_Lock(&pace_mtx);
280 0
        vca_pace *= cache_param->acceptor_sleep_decay;
281 0
        if (vca_pace < cache_param->acceptor_sleep_incr)
282 0
                vca_pace = 0.0;
283 0
        Lck_Unlock(&pace_mtx);
284
}
285
286
/*--------------------------------------------------------------------
287
 * The pool-task for a newly accepted session
288
 *
289
 * Called from assigned worker thread
290
 */
291
292
static void v_matchproto_(task_func_t)
293 1093
vca_make_session(struct worker *wrk, void *arg)
294
{
295
        struct sess *sp;
296
        struct req *req;
297
        struct wrk_accept *wa;
298
        struct sockaddr_storage ss;
299
        struct suckaddr *sa;
300
        socklen_t sl;
301
        char laddr[VTCP_ADDRBUFSIZE];
302
        char lport[VTCP_PORTBUFSIZE];
303
        char raddr[VTCP_ADDRBUFSIZE];
304
        char rport[VTCP_PORTBUFSIZE];
305
306 1093
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
307 1093
        CAST_OBJ_NOTNULL(wa, arg, WRK_ACCEPT_MAGIC);
308
309 1093
        if (VTCP_blocking(wa->acceptsock)) {
310 0
                closefd(&wa->acceptsock);
311 0
                wrk->stats->sess_drop++;        // XXX Better counter ?
312 0
                WS_Release(wrk->aws, 0);
313 0
                return;
314
        }
315
316
        /* Turn accepted socket into a session */
317 1093
        AN(wrk->aws->r);
318 1093
        sp = SES_New(wrk->pool);
319 1093
        if (sp == NULL) {
320
                /*
321
                 * We consider this a DoS situation and silently close the
322
                 * connection with minimum effort and fuzz, rather than try
323
                 * to send an intelligent message back.
324
                 */
325 0
                vca_pace_bad();
326 0
                (void)VTCP_nonblocking(wa->acceptsock);
327 0
                closefd(&wa->acceptsock);
328 0
                wrk->stats->sess_drop++;
329 0
                WS_Release(wrk->aws, 0);
330 0
                return;
331
        }
332 1093
        CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
333 1093
        wrk->stats->s_sess++;
334
335 1093
        sp->t_open = VTIM_real();
336 1093
        sp->t_idle = sp->t_open;
337 1093
        sp->vxid = VXID_Get(wrk, VSL_CLIENTMARKER);
338
339 1093
        sp->fd = wa->acceptsock;
340 1093
        wa->acceptsock = -1;
341
342 1093
        assert(wa->acceptaddrlen <= vsa_suckaddr_len);
343 1093
        SES_Reserve_remote_addr(sp, &sa);
344 1093
        AN(VSA_Build(sa, &wa->acceptaddr, wa->acceptaddrlen));
345 1093
        sp->sattr[SA_CLIENT_ADDR] = sp->sattr[SA_REMOTE_ADDR];
346
347 1093
        VTCP_name(sa, raddr, sizeof raddr, rport, sizeof rport);
348 1093
        SES_Set_String_Attr(sp, SA_CLIENT_IP, raddr);
349 1093
        SES_Set_String_Attr(sp, SA_CLIENT_PORT, rport);
350
351 1093
        sl = sizeof ss;
352 1093
        AZ(getsockname(sp->fd, (void*)&ss, &sl));
353 1093
        SES_Reserve_local_addr(sp, &sa);
354 1093
        AN(VSA_Build(sa, &ss, sl));
355 1092
        sp->sattr[SA_SERVER_ADDR] = sp->sattr[SA_LOCAL_ADDR];
356
357 1092
        VTCP_name(sa, laddr, sizeof laddr, lport, sizeof lport);
358
359 1093
        VSL(SLT_Begin, sp->vxid, "sess 0 %s",
360 1093
            wa->acceptlsock->transport->name);
361 3279
        VSL(SLT_SessOpen, sp->vxid, "%s %s %s %s %s %.6f %d",
362
            raddr, rport,
363 1093
            wa->acceptlsock->name != NULL ?
364 1093
                wa->acceptlsock->name : wa->acceptlsock->endpoint,
365
            laddr, lport,
366
            sp->t_open, sp->fd);
367
368 1093
        WS_Release(wrk->aws, 0);
369
370 1093
        vca_pace_good();
371 1093
        wrk->stats->sess_conn++;
372
373 1093
        if (need_test) {
374 579
                vca_tcp_opt_test(sp->fd);
375 579
                need_test = 0;
376
        }
377 1093
        vca_tcp_opt_set(sp->fd, 0);
378
379 1093
        req = Req_New(wrk, sp);
380 1093
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
381 1093
        req->htc->rfd = &sp->fd;
382
383 1093
        SES_SetTransport(wrk, sp, req, wa->acceptlsock->transport);
384
}
385
386
/*--------------------------------------------------------------------
387
 * This function accepts on a single socket for a single thread pool.
388
 *
389
 * As long as we can stick the accepted connection to another thread
390
 * we do so, otherwise we put the socket back on the "BACK" pool
391
 * and handle the new connection ourselves.
392
 */
393
394
static void v_matchproto_(task_func_t)
395 1221
vca_accept_task(struct worker *wrk, void *arg)
396
{
397
        struct wrk_accept wa;
398
        struct poolsock *ps;
399
        struct listen_sock *ls;
400
        int i;
401
402 1221
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
403 1221
        CAST_OBJ_NOTNULL(ps, arg, POOLSOCK_MAGIC);
404 1221
        ls = ps->lsock;
405 1221
        CHECK_OBJ_NOTNULL(ls, LISTEN_SOCK_MAGIC);
406
407 3636
        while (!pool_accepting)
408 1196
                VTIM_sleep(.1);
409
410 3529
        while (!ps->pool->die) {
411 2310
                INIT_OBJ(&wa, WRK_ACCEPT_MAGIC);
412 2310
                wa.acceptlsock = ls;
413
414 2310
                vca_pace_check();
415
416 2310
                wa.acceptaddrlen = sizeof wa.acceptaddr;
417
                do {
418 2310
                        i = accept(ls->sock, (void*)&wa.acceptaddr,
419
                                   &wa.acceptaddrlen);
420 1093
                } while (i < 0 && errno == EAGAIN);
421
422 1093
                if (i < 0 && ps->pool->die) {
423 0
                        VSL(SLT_Debug, 0, "XXX Accept thread dies %p", ps);
424 0
                        FREE_OBJ(ps);
425 2
                        return;
426
                }
427
428 1093
                if (i < 0 && ls->sock == -2) {
429
                        /* Shut down in progress */
430 0
                        sleep(2);
431 0
                        continue;
432
                }
433
434 1093
                if (i < 0) {
435 0
                        switch (errno) {
436
                        case ECONNABORTED:
437 0
                                break;
438
                        case EMFILE:
439 0
                                VSL(SLT_Debug, ls->sock, "Too many open files");
440 0
                                vca_pace_bad();
441 0
                                break;
442
                        case EBADF:
443 0
                                VSL(SLT_Debug, ls->sock, "Accept failed: %s",
444 0
                                    strerror(errno));
445 0
                                vca_pace_bad();
446 0
                                break;
447
                        default:
448 0
                                VSL(SLT_Debug, ls->sock, "Accept failed: %s",
449 0
                                    strerror(errno));
450 0
                                vca_pace_bad();
451 0
                                break;
452
                        }
453 0
                        wrk->stats->sess_fail++;
454 0
                        (void)Pool_TrySumstat(wrk);
455 0
                        continue;
456
                }
457
458 1093
                wa.acceptsock = i;
459
460 1093
                if (!Pool_Task_Arg(wrk, TASK_QUEUE_VCA,
461
                    vca_make_session, &wa, sizeof wa)) {
462
                        /*
463
                         * We couldn't get another thread, so we will handle
464
                         * the request in this worker thread, but first we
465
                         * must reschedule the listening task so it will be
466
                         * taken up by another thread again.
467
                         */
468 2
                        if (!ps->pool->die)
469 0
                                AZ(Pool_Task(wrk->pool, &ps->task,
470
                                    TASK_QUEUE_VCA));
471 2
                        return;
472
                }
473 1091
                if (!ps->pool->die && DO_DEBUG(DBG_SLOW_ACCEPTOR))
474 6
                        VTIM_sleep(2.0);
475
476
                /*
477
                 * We were able to hand off, so release this threads VCL
478
                 * reference (if any) so we don't hold on to discarded VCLs.
479
                 */
480 1091
                if (wrk->vcl != NULL)
481 0
                        VCL_Rel(&wrk->vcl);
482
        }
483
}
484
485
/*--------------------------------------------------------------------
486
 * Called when a worker and attached thread pool is created, to
487
 * allocate the tasks which will listen to sockets for that pool.
488
 */
489
490
void
491 1221
VCA_NewPool(struct pool *pp)
492
{
493
        struct listen_sock *ls;
494
        struct poolsock *ps;
495
496 2442
        VTAILQ_FOREACH(ls, &heritage.socks, list) {
497 1221
                ALLOC_OBJ(ps, POOLSOCK_MAGIC);
498 1221
                AN(ps);
499 1221
                ps->lsock = ls;
500 1221
                ps->task.func = vca_accept_task;
501 1221
                ps->task.priv = ps;
502 1221
                ps->pool = pp;
503 1221
                VTAILQ_INSERT_TAIL(&pp->poolsocks, ps, list);
504 1221
                AZ(Pool_Task(pp, &ps->task, TASK_QUEUE_VCA));
505
        }
506 1221
}
507
508
void
509 2
VCA_DestroyPool(struct pool *pp)
510
{
511
        struct poolsock *ps;
512
513 6
        while (!VTAILQ_EMPTY(&pp->poolsocks)) {
514 2
                ps = VTAILQ_FIRST(&pp->poolsocks);
515 2
                VTAILQ_REMOVE(&pp->poolsocks, ps, list);
516
        }
517 2
}
518
519
/*--------------------------------------------------------------------*/
520
521
static void * v_matchproto_()
522 613
vca_acct(void *arg)
523
{
524
        struct listen_sock *ls;
525
        double t0, now;
526
527
        // XXX Actually a mis-nomer now because the accept happens in a pool
528
        // thread. Rename to accept-nanny or so?
529 613
        THR_SetName("cache-acceptor");
530 613
        THR_Init();
531
        (void)arg;
532
533 613
        (void)vca_tcp_opt_init();
534
535 613
        AZ(pthread_mutex_lock(&shut_mtx));
536 1226
        VTAILQ_FOREACH(ls, &heritage.socks, list) {
537 613
                CHECK_OBJ_NOTNULL(ls->transport, TRANSPORT_MAGIC);
538 613
                if (ls->sock == -2)
539 0
                        continue;       // VCA_Shutdown
540 613
                assert (ls->sock > 0);  // We know where stdin is
541 613
                if (cache_param->tcp_fastopen) {
542
                        int i;
543 0
                        i = VTCP_fastopen(ls->sock, cache_param->listen_depth);
544 0
                        if (i)
545 0
                                VSL(SLT_Error, ls->sock,
546
                                    "Kernel TCP Fast Open: sock=%d, ret=%d %s",
547 0
                                    ls->sock, i, strerror(errno));
548
                }
549 613
                AZ(listen(ls->sock, cache_param->listen_depth));
550 613
                vca_tcp_opt_set(ls->sock, 1);
551 613
                if (cache_param->accept_filter) {
552
                        int i;
553 612
                        i = VTCP_filter_http(ls->sock);
554 612
                        if (i)
555 612
                                VSL(SLT_Error, ls->sock,
556
                                    "Kernel filtering: sock=%d, ret=%d %s",
557 612
                                    ls->sock, i, strerror(errno));
558
                }
559
        }
560 613
        AZ(pthread_mutex_unlock(&shut_mtx));
561
562 613
        need_test = 1;
563 613
        pool_accepting = 1;
564
565 613
        t0 = VTIM_real();
566
        while (1) {
567 1949
                (void)sleep(1);
568 1336
                if (vca_tcp_opt_init()) {
569 1
                        AZ(pthread_mutex_lock(&shut_mtx));
570 2
                        VTAILQ_FOREACH(ls, &heritage.socks, list) {
571 1
                                if (ls->sock == -2)
572 0
                                        continue;       // VCA_Shutdown
573 1
                                assert (ls->sock > 0);
574 1
                                vca_tcp_opt_set(ls->sock, 1);
575
                        }
576 1
                        AZ(pthread_mutex_unlock(&shut_mtx));
577
                }
578 1336
                now = VTIM_real();
579 1336
                VSC_C_main->uptime = (uint64_t)(now - t0);
580 1336
        }
581
        NEEDLESS(return NULL);
582
}
583
584
/*--------------------------------------------------------------------*/
585
586
static void v_matchproto_(cli_func_t)
587 613
ccf_start(struct cli *cli, const char * const *av, void *priv)
588
{
589
590
        (void)cli;
591
        (void)av;
592
        (void)priv;
593
594 613
        AZ(pthread_create(&VCA_thread, NULL, vca_acct, NULL));
595 613
}
596
597
/*--------------------------------------------------------------------*/
598
599
static void v_matchproto_(cli_func_t)
600 1217
ccf_listen_address(struct cli *cli, const char * const *av, void *priv)
601
{
602
        struct listen_sock *ls;
603
        char h[32], p[32];
604
605
        (void)cli;
606
        (void)av;
607
        (void)priv;
608
609
        /*
610
         * This CLI command is primarily used by varnishtest.  Don't
611
         * respond until listen(2) has been called, in order to avoid
612
         * a race where varnishtest::client would attempt to connect(2)
613
         * before listen(2) has been called.
614
         */
615 2434
        while (!pool_accepting)
616 0
                VTIM_sleep(.1);
617
618 1217
        AZ(pthread_mutex_lock(&shut_mtx));
619 2434
        VTAILQ_FOREACH(ls, &heritage.socks, list) {
620 1217
                VTCP_myname(ls->sock, h, sizeof h, p, sizeof p);
621 1217
                VCLI_Out(cli, "%s %s\n", h, p);
622
        }
623 1217
        AZ(pthread_mutex_unlock(&shut_mtx));
624 1217
}
625
626
/*--------------------------------------------------------------------*/
627
628
static struct cli_proto vca_cmds[] = {
629
        { CLICMD_SERVER_START,          "", ccf_start },
630
        { CLICMD_DEBUG_LISTEN_ADDRESS,  "d", ccf_listen_address },
631
        { NULL }
632
};
633
634
void
635 614
VCA_Init(void)
636
{
637
638 614
        CLI_AddFuncs(vca_cmds);
639 614
        Lck_New(&pace_mtx, lck_vcapace);
640 614
}
641
642
void
643 609
VCA_Shutdown(void)
644
{
645
        struct listen_sock *ls;
646
        int i;
647
648 609
        AZ(pthread_mutex_lock(&shut_mtx));
649 1218
        VTAILQ_FOREACH(ls, &heritage.socks, list) {
650 609
                i = ls->sock;
651 609
                ls->sock = -2;
652 609
                (void)close(i);
653
        }
654 609
        AZ(pthread_mutex_unlock(&shut_mtx));
655 609
}
656
657
/*--------------------------------------------------------------------
658
 * Transport protocol registration
659
 *
660
 */
661
662
static VTAILQ_HEAD(,transport)  transports =
663
    VTAILQ_HEAD_INITIALIZER(transports);
664
665
void
666 662
XPORT_Init(void)
667
{
668
        uint16_t n;
669
        struct transport *xp;
670
671 662
        ASSERT_MGT();
672
673 662
        VTAILQ_INSERT_TAIL(&transports, &PROXY_transport, list);
674 662
        VTAILQ_INSERT_TAIL(&transports, &HTTP1_transport, list);
675 662
        VTAILQ_INSERT_TAIL(&transports, &H2_transport, list);
676
677 662
        n = 0;
678 2648
        VTAILQ_FOREACH(xp, &transports, list)
679 1986
                xp->number = ++n;
680 662
}
681
682
const struct transport *
683 643
XPORT_Find(const char *name)
684
{
685
        const struct transport *xp;
686
687 643
        ASSERT_MGT();
688
689 1279
        VTAILQ_FOREACH(xp, &transports, list)
690 2555
                if (xp->proto_ident != NULL &&
691 1277
                    !strcasecmp(xp->proto_ident, name))
692 642
                        return (xp);
693 1
        return (NULL);
694
}
695
696
const struct transport *
697 3
XPORT_ByNumber(uint16_t no)
698
{
699
        const struct transport *xp;
700
701 7
        VTAILQ_FOREACH(xp, &transports, list)
702 7
                if (xp->number == no)
703 3
                        return (xp);
704 0
        return (NULL);
705
}