varnish-cache/bin/varnishd/cache/cache_tcp_pool.c
1
/*-
2
 * Copyright (c) 2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice, this list of conditions and the following disclaimer.
12
 * 2. Redistributions in binary form must reproduce the above copyright
13
 *    notice, this list of conditions and the following disclaimer in the
14
 *    documentation and/or other materials provided with the distribution.
15
 *
16
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
 * SUCH DAMAGE.
27
 *
28
 * TCP connection pools.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <errno.h>
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vsa.h"
40
#include "vtcp.h"
41
#include "vtim.h"
42
#include "waiter/waiter.h"
43
44
#include "cache_tcp_pool.h"
45
#include "cache_pool.h"
46
47
struct tcp_pool {
48
        unsigned                magic;
49
#define TCP_POOL_MAGIC          0x28b0e42a
50
51
        const void              *id;
52
        struct suckaddr         *ip4;
53
        struct suckaddr         *ip6;
54
55
        VTAILQ_ENTRY(tcp_pool)  list;
56
        int                     refcnt;
57
        struct lock             mtx;
58
59
        VTAILQ_HEAD(, vtp)      connlist;
60
        int                     n_conn;
61
62
        VTAILQ_HEAD(, vtp)      killlist;
63
        int                     n_kill;
64
65
        int                     n_used;
66
};
67
68
static struct lock              tcp_pools_mtx;
69
static VTAILQ_HEAD(, tcp_pool)  tcp_pools = VTAILQ_HEAD_INITIALIZER(tcp_pools);
70
71
/*--------------------------------------------------------------------
72
 * Waiter-handler
73
 */
74
75
static void  v_matchproto_(waiter_handle_f)
76 2287
tcp_handle(struct waited *w, enum wait_event ev, double now)
77
{
78
        struct vtp *vtp;
79
        struct tcp_pool *tp;
80
81 2287
        CAST_OBJ_NOTNULL(vtp, w->priv1, VTP_MAGIC);
82
        (void)ev;
83
        (void)now;
84 2287
        CHECK_OBJ_NOTNULL(vtp->tcp_pool, TCP_POOL_MAGIC);
85 2287
        tp = vtp->tcp_pool;
86
87 2287
        Lck_Lock(&tp->mtx);
88
89 2288
        switch (vtp->state) {
90
        case VTP_STATE_STOLEN:
91 932
                vtp->state = VTP_STATE_USED;
92 932
                VTAILQ_REMOVE(&tp->connlist, vtp, list);
93 932
                AN(vtp->cond);
94 932
                AZ(pthread_cond_signal(vtp->cond));
95 932
                break;
96
        case VTP_STATE_AVAIL:
97 1354
                VTCP_close(&vtp->fd);
98 1354
                VTAILQ_REMOVE(&tp->connlist, vtp, list);
99 1354
                tp->n_conn--;
100 1354
                FREE_OBJ(vtp);
101 1354
                break;
102
        case VTP_STATE_CLEANUP:
103 2
                VTCP_close(&vtp->fd);
104 2
                tp->n_kill--;
105 2
                VTAILQ_REMOVE(&tp->killlist, vtp, list);
106 2
                memset(vtp, 0x11, sizeof *vtp);
107 2
                free(vtp);
108 2
                break;
109
        default:
110 0
                WRONG("Wrong vtp state");
111
        }
112 2288
        Lck_Unlock(&tp->mtx);
113 2288
}
114
115
/*--------------------------------------------------------------------
116
 * Reference a TCP pool given by {ip4, ip6} pair.  Create if it
117
 * doesn't exist already.
118
 */
119
120
struct tcp_pool *
121 1656
VTP_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6, const void *id)
122
{
123
        struct tcp_pool *tp;
124
125 1656
        assert(ip4 != NULL || ip6 != NULL);
126 1656
        Lck_Lock(&tcp_pools_mtx);
127 1988
        VTAILQ_FOREACH(tp, &tcp_pools, list) {
128 540
                assert(tp->refcnt > 0);
129 540
                if (tp->id != id)
130 0
                        continue;
131 540
                if (ip4 == NULL) {
132 0
                        if (tp->ip4 != NULL)
133 0
                                continue;
134
                } else {
135 540
                        if (tp->ip4 == NULL)
136 0
                                continue;
137 540
                        if (VSA_Compare(ip4, tp->ip4))
138 332
                                continue;
139
                }
140 208
                if (ip6 == NULL) {
141 208
                        if (tp->ip6 != NULL)
142 0
                                continue;
143
                } else {
144 0
                        if (tp->ip6 == NULL)
145 0
                                continue;
146 0
                        if (VSA_Compare(ip6, tp->ip6))
147 0
                                continue;
148
                }
149 208
                tp->refcnt++;
150 208
                Lck_Unlock(&tcp_pools_mtx);
151 208
                return (tp);
152
        }
153 1448
        Lck_Unlock(&tcp_pools_mtx);
154
155 1448
        ALLOC_OBJ(tp, TCP_POOL_MAGIC);
156 1448
        AN(tp);
157 1448
        if (ip4 != NULL)
158 1448
                tp->ip4 = VSA_Clone(ip4);
159 1448
        if (ip6 != NULL)
160 2
                tp->ip6 = VSA_Clone(ip6);
161 1448
        tp->refcnt = 1;
162 1448
        tp->id = id;
163 1448
        Lck_New(&tp->mtx, lck_tcp_pool);
164 1448
        VTAILQ_INIT(&tp->connlist);
165 1448
        VTAILQ_INIT(&tp->killlist);
166
167 1448
        Lck_Lock(&tcp_pools_mtx);
168 1448
        VTAILQ_INSERT_HEAD(&tcp_pools, tp, list);
169 1448
        Lck_Unlock(&tcp_pools_mtx);
170
171 1448
        return (tp);
172
}
173
174
/*--------------------------------------------------------------------
175
 * Add a reference to a tcp_pool
176
 */
177
178
void
179 28
VTP_AddRef(struct tcp_pool *tp)
180
{
181 28
        CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
182
183 28
        Lck_Lock(&tcp_pools_mtx);
184 28
        assert(tp->refcnt > 0);
185 28
        tp->refcnt++;
186 28
        Lck_Unlock(&tcp_pools_mtx);
187 28
}
188
189
/*--------------------------------------------------------------------
190
 * Release TCP pool, destroy if last reference.
191
 */
192
193
void
194 82
VTP_Rel(struct tcp_pool **tpp)
195
{
196
        struct tcp_pool *tp;
197
        struct vtp *vtp, *vtp2;
198
199 82
        TAKE_OBJ_NOTNULL(tp, tpp, TCP_POOL_MAGIC);
200
201 82
        Lck_Lock(&tcp_pools_mtx);
202 82
        assert(tp->refcnt > 0);
203 82
        if (--tp->refcnt > 0) {
204 68
                Lck_Unlock(&tcp_pools_mtx);
205 150
                return;
206
        }
207 14
        AZ(tp->n_used);
208 14
        VTAILQ_REMOVE(&tcp_pools, tp, list);
209 14
        Lck_Unlock(&tcp_pools_mtx);
210
211 14
        free(tp->ip4);
212 14
        free(tp->ip6);
213 14
        Lck_Lock(&tp->mtx);
214 14
        VTAILQ_FOREACH_SAFE(vtp, &tp->connlist, list, vtp2) {
215 0
                VTAILQ_REMOVE(&tp->connlist, vtp, list);
216 0
                tp->n_conn--;
217 0
                assert(vtp->state == VTP_STATE_AVAIL);
218 0
                vtp->state = VTP_STATE_CLEANUP;
219 0
                (void)shutdown(vtp->fd, SHUT_WR);
220 0
                VTAILQ_INSERT_TAIL(&tp->killlist, vtp, list);
221 0
                tp->n_kill++;
222
        }
223 28
        while (tp->n_kill) {
224 0
                Lck_Unlock(&tp->mtx);
225 0
                (void)usleep(20000);
226 0
                Lck_Lock(&tp->mtx);
227
        }
228 14
        Lck_Unlock(&tp->mtx);
229 14
        Lck_Delete(&tp->mtx);
230 14
        AZ(tp->n_conn);
231 14
        AZ(tp->n_kill);
232
233 14
        FREE_OBJ(tp);
234
}
235
236
/*--------------------------------------------------------------------
237
 * Open a new connection from pool.  This is a distinct function since
238
 * probing cannot use a recycled connection.
239
 */
240
241
int
242 2055
VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
243
{
244
        int s;
245
        int msec;
246
247 2055
        CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
248
249 2055
        msec = (int)floor(tmo * 1000.0);
250 2055
        if (cache_param->prefer_ipv6) {
251 0
                *sa = tp->ip6;
252 0
                s = VTCP_connect(tp->ip6, msec);
253 0
                if (s >= 0)
254 0
                        return (s);
255
        }
256 2055
        *sa = tp->ip4;
257 2055
        s = VTCP_connect(tp->ip4, msec);
258 2055
        if (s >= 0)
259 1894
                return (s);
260 161
        if (!cache_param->prefer_ipv6) {
261 161
                *sa = tp->ip6;
262 161
                s = VTCP_connect(tp->ip6, msec);
263
        }
264 161
        return (s);
265
}
266
267
/*--------------------------------------------------------------------
268
 * Recycle a connection.
269
 */
270
271
void
272 2310
VTP_Recycle(const struct worker *wrk, struct vtp **vtpp)
273
{
274
        struct vtp *vtp;
275
        struct tcp_pool *tp;
276 2310
        int i = 0;
277
278 2310
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
279 2310
        vtp = *vtpp;
280 2310
        *vtpp = NULL;
281 2310
        CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
282 2310
        tp = vtp->tcp_pool;
283 2310
        CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
284
285 2310
        assert(vtp->state == VTP_STATE_USED);
286 2310
        assert(vtp->fd > 0);
287
288 2310
        Lck_Lock(&tp->mtx);
289 2310
        tp->n_used--;
290
291 2310
        vtp->waited->priv1 = vtp;
292 2310
        vtp->waited->fd = vtp->fd;
293 2310
        vtp->waited->idle = VTIM_real();
294 2310
        vtp->state = VTP_STATE_AVAIL;
295 2310
        vtp->waited->func = tcp_handle;
296 2310
        vtp->waited->tmo = &cache_param->backend_idle_timeout;
297 2310
        if (Wait_Enter(wrk->pool->waiter, vtp->waited)) {
298 0
                VTCP_close(&vtp->fd);
299 0
                memset(vtp, 0x33, sizeof *vtp);
300 0
                free(vtp);
301
                // XXX: stats
302 0
                vtp = NULL;
303
        } else {
304 2310
                VTAILQ_INSERT_HEAD(&tp->connlist, vtp, list);
305 2310
                i++;
306
        }
307
308 2310
        if (vtp != NULL)
309 2310
                tp->n_conn++;
310 2310
        Lck_Unlock(&tp->mtx);
311
312 2310
        if (i && DO_DEBUG(DBG_VTC_MODE)) {
313
                /*
314
                 * In varnishtest we do not have the luxury of using
315
                 * multiple backend connections, so whenever we end up
316
                 * in the "pending" case, take a short nap to let the
317
                 * waiter catch up and put the vtp back into circulations.
318
                 *
319
                 * In particular ESI:include related tests suffer random
320
                 * failures without this.
321
                 *
322
                 * In normal operation, the only effect is that we will
323
                 * have N+1 backend connections rather than N, which is
324
                 * entirely harmless.
325
                 */
326 2310
                (void)usleep(10000);
327
        }
328 2310
}
329
330
/*--------------------------------------------------------------------
331
 * Close a connection.
332
 */
333
334
void
335 338
VTP_Close(struct vtp **vtpp)
336
{
337
        struct vtp *vtp;
338
        struct tcp_pool *tp;
339
340 338
        vtp = *vtpp;
341 338
        *vtpp = NULL;
342 338
        CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
343 338
        tp = vtp->tcp_pool;
344 338
        CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
345
346 338
        assert(vtp->fd > 0);
347
348 338
        Lck_Lock(&tp->mtx);
349 338
        assert(vtp->state == VTP_STATE_USED || vtp->state == VTP_STATE_STOLEN);
350 338
        tp->n_used--;
351 338
        if (vtp->state == VTP_STATE_STOLEN) {
352 2
                (void)shutdown(vtp->fd, SHUT_RDWR);
353 2
                VTAILQ_REMOVE(&tp->connlist, vtp, list);
354 2
                vtp->state = VTP_STATE_CLEANUP;
355 2
                VTAILQ_INSERT_HEAD(&tp->killlist, vtp, list);
356 2
                tp->n_kill++;
357
        } else {
358 336
                assert(vtp->state == VTP_STATE_USED);
359 336
                VTCP_close(&vtp->fd);
360 336
                memset(vtp, 0x44, sizeof *vtp);
361 336
                free(vtp);
362
        }
363 338
        Lck_Unlock(&tp->mtx);
364 338
}
365
366
/*--------------------------------------------------------------------
367
 * Get a connection
368
 */
369
370
struct vtp *
371 2678
VTP_Get(struct tcp_pool *tp, double tmo, struct worker *wrk,
372
    unsigned force_fresh)
373
{
374
        struct vtp *vtp;
375
376 2678
        CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
377 2678
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
378
379 2678
        Lck_Lock(&tp->mtx);
380 2678
        vtp = VTAILQ_FIRST(&tp->connlist);
381 2678
        CHECK_OBJ_ORNULL(vtp, VTP_MAGIC);
382 2678
        if (force_fresh || vtp == NULL || vtp->state == VTP_STATE_STOLEN)
383 1744
                vtp = NULL;
384
        else {
385 934
                assert(vtp->tcp_pool == tp);
386 934
                assert(vtp->state == VTP_STATE_AVAIL);
387 934
                VTAILQ_REMOVE(&tp->connlist, vtp, list);
388 934
                VTAILQ_INSERT_TAIL(&tp->connlist, vtp, list);
389 934
                tp->n_conn--;
390 934
                VSC_C_main->backend_reuse++;
391 934
                vtp->state = VTP_STATE_STOLEN;
392 934
                vtp->cond = &wrk->cond;
393
        }
394 2678
        tp->n_used++;                   // Opening mostly works
395 2678
        Lck_Unlock(&tp->mtx);
396
397 2678
        if (vtp != NULL)
398 934
                return (vtp);
399
400 1744
        ALLOC_OBJ(vtp, VTP_MAGIC);
401 1744
        AN(vtp);
402 1744
        INIT_OBJ(vtp->waited, WAITED_MAGIC);
403 1744
        vtp->state = VTP_STATE_USED;
404 1744
        vtp->tcp_pool = tp;
405 1744
        vtp->fd = VTP_Open(tp, tmo, &vtp->addr);
406 1744
        if (vtp->fd < 0) {
407 28
                FREE_OBJ(vtp);
408 28
                Lck_Lock(&tp->mtx);
409 28
                tp->n_used--;           // Nope, didn't work after all.
410 28
                Lck_Unlock(&tp->mtx);
411
        } else
412 1716
                VSC_C_main->backend_conn++;
413
414 1744
        return (vtp);
415
}
416
417
/*--------------------------------------------------------------------
418
 */
419
420
int
421 929
VTP_Wait(struct worker *wrk, struct vtp *vtp, double tmo)
422
{
423
        struct tcp_pool *tp;
424
        int r;
425
426 929
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
427 929
        CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
428 929
        tp = vtp->tcp_pool;
429 929
        CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
430 929
        assert(vtp->cond == &wrk->cond);
431 929
        Lck_Lock(&tp->mtx);
432 929
        while (vtp->state == VTP_STATE_STOLEN) {
433 929
                r = Lck_CondWait(&wrk->cond, &tp->mtx, tmo);
434 929
                if (r != 0) {
435 2
                        if (r == EINTR)
436 0
                                continue;
437 2
                        assert(r == ETIMEDOUT);
438 2
                        Lck_Unlock(&tp->mtx);
439 2
                        return (1);
440
                }
441
        }
442 927
        assert(vtp->state == VTP_STATE_USED);
443 927
        vtp->cond = NULL;
444 927
        Lck_Unlock(&tp->mtx);
445
446 927
        return (0);
447
}
448
449
/*--------------------------------------------------------------------*/
450
451
void
452 1228
VTP_Init(void)
453
{
454 1228
        Lck_New(&tcp_pools_mtx, lck_tcp_pool);
455 1228
}