varnish-cache/bin/varnishd/http1/cache_http1_line.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2011 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 * Write data to fd
31
 * We try to use writev() if possible in order to minimize number of
32
 * syscalls made and packets sent.  It also just might allow the worker
33
 * thread to complete the request without holding stuff locked.
34
 *
35
 * XXX: chunked header (generated in Flush) and Tail (EndChunk)
36
 *      are not accounted by means of the size_t returned. Obvious ideas:
37
 *      - add size_t return value to Flush and EndChunk
38
 *      - base accounting on (struct v1l).cnt
39
 */
40
41
#include "config.h"
42
43
#include <sys/uio.h>
44
#include "cache/cache_varnishd.h"
45
#include "cache/cache_filter.h"
46
47
#include <stdio.h>
48
49
#include "cache_http1.h"
50
#include "vtim.h"
51
52
/*--------------------------------------------------------------------*/
53
54
struct v1l {
55
        unsigned                magic;
56
#define V1L_MAGIC               0x2f2142e5
57
        int                     *wfd;
58
        stream_close_t          werr;   /* valid after V1L_Flush() */
59
        struct iovec            *iov;
60
        int                     siov;
61
        int                     niov;
62
        size_t                  liov;
63
        size_t                  cliov;
64
        int                     ciov;   /* Chunked header marker */
65
        vtim_real               deadline;
66
        struct vsl_log          *vsl;
67
        uint64_t                cnt;    /* Flushed byte count */
68
        struct ws               *ws;
69
        uintptr_t               ws_snap;
70
        void                    **vdp_priv;
71
};
72
73
/*--------------------------------------------------------------------
74
 * for niov == 0, reserve the ws for max number of iovs
75
 * otherwise, up to niov
76
 */
77
78
struct v1l *
79 203912
V1L_Open(struct ws *ws, int *fd, struct vsl_log *vsl,
80
    vtim_real deadline, unsigned niov)
81
{
82
        struct v1l *v1l;
83
        unsigned u;
84
        uintptr_t ws_snap;
85
        size_t sz;
86
87 203912
        if (WS_Overflowed(ws))
88 0
                return (NULL);
89
90 203912
        if (niov != 0)
91 120370
                assert(niov >= 3);
92
93 203912
        ws_snap = WS_Snapshot(ws);
94
95 203912
        v1l = WS_Alloc(ws, sizeof *v1l);
96 203912
        if (v1l == NULL)
97 40
                return (NULL);
98 203872
        INIT_OBJ(v1l, V1L_MAGIC);
99
100 203872
        v1l->ws = ws;
101 203872
        v1l->ws_snap = ws_snap;
102
103 203872
        u = WS_ReserveLumps(ws, sizeof(struct iovec));
104 203872
        if (u < 3) {
105
                /* Must have at least 3 in case of chunked encoding */
106 0
                WS_Release(ws, 0);
107 0
                WS_MarkOverflow(ws);
108 0
                return (NULL);
109
        }
110 203872
        if (u > IOV_MAX)
111 1280
                u = IOV_MAX;
112 203872
        if (niov != 0 && u > niov)
113 117328
                u = niov;
114 203872
        v1l->iov = WS_Reservation(ws);
115 203872
        v1l->siov = (int)u;
116 203872
        v1l->ciov = (int)u;
117 203872
        v1l->wfd = fd;
118 203872
        v1l->deadline = deadline;
119 203872
        v1l->vsl = vsl;
120 203872
        v1l->werr = SC_NULL;
121
122 203872
        sz = u * sizeof(struct iovec);
123 203872
        assert(sz < UINT_MAX);
124 203872
        WS_Release(ws, (unsigned)sz);
125 203872
        return (v1l);
126 203912
}
127
128
stream_close_t
129 203893
V1L_Close(struct v1l **v1lp, uint64_t *cnt)
130
{
131
        struct v1l *v1l;
132
        struct ws *ws;
133
        uintptr_t ws_snap;
134
        stream_close_t sc;
135
136 203893
        AN(cnt);
137 203893
        TAKE_OBJ_NOTNULL(v1l, v1lp, V1L_MAGIC);
138 203893
        if (v1l->vdp_priv != NULL) {
139 160031
                assert(*v1l->vdp_priv == v1l);
140 160031
                *v1l->vdp_priv = NULL;
141 160031
        }
142 203893
        sc = V1L_Flush(v1l);
143 203893
        *cnt = v1l->cnt;
144 203893
        ws = v1l->ws;
145 203893
        ws_snap = v1l->ws_snap;
146 203893
        ZERO_OBJ(v1l, sizeof *v1l);
147 203893
        WS_Rollback(ws, ws_snap);
148 203893
        return (sc);
149
}
150
151
static void
152 768
v1l_prune(struct v1l *v1l, ssize_t abytes)
153
{
154 768
        size_t used = 0;
155
        size_t sz, bytes, used_here;
156
        int j;
157
158 768
        assert(abytes > 0);
159 768
        bytes = (size_t)abytes;
160
161 2824
        for (j = 0; j < v1l->niov; j++) {
162 2824
                if (used + v1l->iov[j].iov_len > bytes) {
163
                        /* Cutoff is in this iov */
164 768
                        used_here = bytes - used;
165 768
                        v1l->iov[j].iov_len -= used_here;
166 768
                        v1l->iov[j].iov_base =
167 768
                            (char*)v1l->iov[j].iov_base + used_here;
168 768
                        sz = (unsigned)v1l->niov - (unsigned)j;
169 768
                        sz *= sizeof(struct iovec);
170 768
                        memmove(v1l->iov, &v1l->iov[j], sz);
171 768
                        v1l->niov -= j;
172 768
                        assert(v1l->liov >= bytes);
173 768
                        v1l->liov -= bytes;
174 768
                        return;
175
                }
176 2056
                used += v1l->iov[j].iov_len;
177 2056
        }
178 0
        AZ(v1l->liov);
179 768
}
180
181
stream_close_t
182 409632
V1L_Flush(struct v1l *v1l)
183
{
184
        ssize_t i;
185
        size_t sz;
186
        int err;
187
        char cbuf[32];
188
189 409632
        CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
190 409632
        CHECK_OBJ_NOTNULL(v1l->werr, STREAM_CLOSE_MAGIC);
191 409632
        AN(v1l->wfd);
192
193 409632
        assert(v1l->niov <= v1l->siov);
194
195 409632
        if (*v1l->wfd >= 0 && v1l->liov > 0 && v1l->werr == SC_NULL) {
196 276272
                if (v1l->ciov < v1l->siov && v1l->cliov > 0) {
197
                        /* Add chunk head & tail */
198 42742
                        bprintf(cbuf, "00%zx\r\n", v1l->cliov);
199 42742
                        sz = strlen(cbuf);
200 42742
                        v1l->iov[v1l->ciov].iov_base = cbuf;
201 42742
                        v1l->iov[v1l->ciov].iov_len = sz;
202 42742
                        v1l->liov += sz;
203
204
                        /* This is OK, because siov was --'ed */
205 42742
                        v1l->iov[v1l->niov].iov_base = cbuf + sz - 2;
206 42742
                        v1l->iov[v1l->niov++].iov_len = 2;
207 42742
                        v1l->liov += 2;
208 276272
                } else if (v1l->ciov < v1l->siov) {
209 2520
                        v1l->iov[v1l->ciov].iov_base = cbuf;
210 2520
                        v1l->iov[v1l->ciov].iov_len = 0;
211 2520
                }
212
213 276272
                i = 0;
214 276272
                err = 0;
215 276272
                do {
216 278114
                        if (VTIM_real() > v1l->deadline) {
217 320
                                VSLb(v1l->vsl, SLT_Debug,
218
                                    "Hit total send timeout, "
219
                                    "wrote = %zd/%zd; not retrying",
220 160
                                    i, v1l->liov);
221 160
                                i = -1;
222 160
                                break;
223
                        }
224
225 277954
                        i = writev(*v1l->wfd, v1l->iov, v1l->niov);
226 277954
                        if (i > 0) {
227 276570
                                v1l->cnt += (size_t)i;
228 276570
                                if ((size_t)i == v1l->liov)
229 275802
                                        break;
230 768
                        }
231
232
                        /* we hit a timeout, and some data may have been sent:
233
                         * Remove sent data from start of I/O vector, then retry
234
                         *
235
                         * XXX: Add a "minimum sent data per timeout counter to
236
                         * prevent slowloris attacks
237
                         */
238
239 2152
                        err = errno;
240
241 2152
                        if (err == EWOULDBLOCK) {
242 2128
                                VSLb(v1l->vsl, SLT_Debug,
243
                                    "Hit idle send timeout, "
244
                                    "wrote = %zd/%zd; retrying",
245 1064
                                    i, v1l->liov);
246 1064
                        }
247
248 2152
                        if (i > 0)
249 768
                                v1l_prune(v1l, i);
250 2152
                } while (i > 0 || err == EWOULDBLOCK);
251
252 276272
                if (i <= 0) {
253 960
                        VSLb(v1l->vsl, SLT_Debug,
254
                            "Write error, retval = %zd, len = %zd, errno = %s",
255 480
                            i, v1l->liov, VAS_errtxt(err));
256 480
                        assert(v1l->werr == SC_NULL);
257 480
                        if (err == EPIPE)
258 318
                                v1l->werr = SC_REM_CLOSE;
259
                        else
260 162
                                v1l->werr = SC_TX_ERROR;
261 480
                        errno = err;
262 480
                }
263 276272
        }
264 409648
        v1l->liov = 0;
265 409648
        v1l->cliov = 0;
266 409648
        v1l->niov = 0;
267 409648
        if (v1l->ciov < v1l->siov)
268 83036
                v1l->ciov = v1l->niov++;
269 409626
        CHECK_OBJ_NOTNULL(v1l->werr, STREAM_CLOSE_MAGIC);
270 409626
        return (v1l->werr);
271
}
272
273
size_t
274 4559887
V1L_Write(struct v1l *v1l, const void *ptr, ssize_t alen)
275
{
276 4559887
        size_t len = 0;
277
278 4559887
        CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
279 4559887
        AN(v1l->wfd);
280 4559887
        if (alen == 0 || *v1l->wfd < 0)
281 882
                return (0);
282 4559887
        if (alen > 0)
283 2246400
                len = (size_t)alen;
284 2313487
        else if (alen == -1)
285 2313487
                len = strlen(ptr);
286
        else
287 0
                WRONG("alen");
288
289 4559887
        assert(v1l->niov < v1l->siov);
290 4559887
        v1l->iov[v1l->niov].iov_base = TRUST_ME(ptr);
291 4559887
        v1l->iov[v1l->niov].iov_len = len;
292 4559887
        v1l->liov += len;
293 4559887
        v1l->niov++;
294 4559887
        v1l->cliov += len;
295 4559887
        if (v1l->niov >= v1l->siov) {
296 4400
                (void)V1L_Flush(v1l);
297 4400
                VSC_C_main->http1_iovs_flush++;
298 4400
        }
299 4559887
        return (len);
300 4559887
}
301
302
void
303 11079
V1L_Chunked(struct v1l *v1l)
304
{
305
306 11079
        CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
307
308 11079
        assert(v1l->ciov == v1l->siov);
309 11079
        assert(v1l->siov >= 3);
310
        /*
311
         * If there is no space for chunked header, a chunk of data and
312
         * a chunk tail, we might as well flush right away.
313
         */
314 11079
        if (v1l->niov + 3 >= v1l->siov) {
315 0
                (void)V1L_Flush(v1l);
316 0
                VSC_C_main->http1_iovs_flush++;
317 0
        }
318 11079
        v1l->siov--;
319 11079
        v1l->ciov = v1l->niov++;
320 11079
        v1l->cliov = 0;
321 11079
        assert(v1l->ciov < v1l->siov);
322 11079
        assert(v1l->niov < v1l->siov);
323 11079
}
324
325
/*
326
 * XXX: It is not worth the complexity to attempt to get the
327
 * XXX: end of chunk into the V1L_Flush(), because most of the time
328
 * XXX: if not always, that is a no-op anyway, because the calling
329
 * XXX: code already called V1L_Flush() to release local storage.
330
 */
331
332
void
333 10241
V1L_EndChunk(struct v1l *v1l)
334
{
335
336 10241
        CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
337
338 10241
        assert(v1l->ciov < v1l->siov);
339 10241
        (void)V1L_Flush(v1l);
340 10241
        v1l->siov++;
341 10241
        v1l->ciov = v1l->siov;
342 10241
        v1l->niov = 0;
343 10241
        v1l->cliov = 0;
344 10241
        (void)V1L_Write(v1l, "0\r\n\r\n", -1);
345 10241
}
346
347
/*--------------------------------------------------------------------
348
 * VDP using V1L
349
 */
350
351
/* remember priv pointer for V1L_Close() to clear */
352
static int v_matchproto_(vdp_init_f)
353 160029
v1l_init(VRT_CTX, struct vdp_ctx *vdc, void **priv)
354
{
355
        struct v1l *v1l;
356
357 160029
        (void) ctx;
358 160029
        (void) vdc;
359 160029
        AN(priv);
360 160029
        CAST_OBJ_NOTNULL(v1l, *priv, V1L_MAGIC);
361
362 160029
        v1l->vdp_priv = priv;
363 160029
        return (0);
364
}
365
366
static int v_matchproto_(vdp_bytes_f)
367 195686
v1l_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv,
368
    const void *ptr, ssize_t len)
369
{
370 195686
        size_t wl = 0;
371
372 195686
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
373 195686
        AN(priv);
374
375 195686
        AZ(vdc->nxt);           /* always at the bottom of the pile */
376
377 195686
        if (len > 0)
378 141978
                wl = V1L_Write(*priv, ptr, len);
379 195686
        if (act > VDP_NULL && V1L_Flush(*priv) != SC_NULL)
380 478
                return (-1);
381 195208
        if ((size_t)len != wl)
382 0
                return (-1);
383 195208
        return (0);
384 195686
}
385
386
const struct vdp * const VDP_v1l = &(struct vdp){
387
        .name =         "V1B",
388
        .init =         v1l_init,
389
        .bytes =        v1l_bytes,
390
};