varnish-cache/bin/varnishd/cache/cache_req_body.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdlib.h>
35
36
#include "cache_varnishd.h"
37
#include "cache_filter.h"
38
#include "cache_objhead.h"
39
#include "cache_transport.h"
40
41
#include "vtim.h"
42
#include "storage/storage.h"
43
44
/*----------------------------------------------------------------------
45
 * Pull the req.body in via/into a objcore
46
 *
47
 * This can be called only once per request
48
 *
49
 */
50
51
static ssize_t
52 2100
vrb_pull(struct req *req, ssize_t maxsize, objiterate_f *func, void *priv)
53
{
54 2100
        ssize_t l, r = 0, yet;
55
        struct vrt_ctx ctx[1];
56
        struct vfp_ctx *vfc;
57
        uint8_t *ptr;
58 2100
        enum vfp_status vfps = VFP_ERROR;
59
        const struct stevedore *stv;
60 2100
        ssize_t req_bodybytes = 0;
61 2100
        unsigned flush = OBJ_ITER_FLUSH;
62
63 2100
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
64
65 2100
        CHECK_OBJ_NOTNULL(req->htc, HTTP_CONN_MAGIC);
66 2100
        CHECK_OBJ_NOTNULL(req->vfc, VFP_CTX_MAGIC);
67 2100
        vfc = req->vfc;
68
69 2100
        req->body_oc = HSH_Private(req->wrk);
70 2100
        AN(req->body_oc);
71
72 2100
        if (req->storage != NULL)
73 125
                stv = req->storage;
74
        else
75 1975
                stv = stv_transient;
76
77 2100
        req->storage = NULL;
78
79 2100
        if (STV_NewObject(req->wrk, req->body_oc, stv, 0) == 0) {
80 25
                req->req_body_status = BS_ERROR;
81 25
                HSH_DerefBoc(req->wrk, req->body_oc);
82 25
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
83 50
                (void)VFP_Error(vfc, "Object allocation failed:"
84 25
                    " Ran out of space in %s", stv->vclname);
85 25
                return (-1);
86
        }
87
88 2075
        vfc->oc = req->body_oc;
89
90 2075
        INIT_OBJ(ctx, VRT_CTX_MAGIC);
91 2075
        VCL_Req2Ctx(ctx, req);
92
93 2075
        if (VFP_Open(ctx, vfc) < 0) {
94 0
                req->req_body_status = BS_ERROR;
95 0
                HSH_DerefBoc(req->wrk, req->body_oc);
96 0
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
97 0
                return (-1);
98
        }
99
100 2075
        AN(req->htc);
101 2075
        yet = req->htc->content_length;
102 2075
        if (yet != 0 && req->want100cont) {
103 75
                req->want100cont = 0;
104 75
                (void)req->transport->minimal_response(req, 100);
105 75
        }
106 2075
        yet = vmax_t(ssize_t, yet, 0);
107 2075
        do {
108 3836
                AZ(vfc->failed);
109 3836
                if (maxsize >= 0 && req_bodybytes > maxsize) {
110 25
                        (void)VFP_Error(vfc, "Request body too big to cache");
111 25
                        break;
112
                }
113
                /* NB: only attempt a full allocation when caching. */
114 3811
                l = maxsize > 0 ? yet : 0;
115 3811
                if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK)
116 0
                        break;
117 3811
                AZ(vfc->failed);
118 3811
                AN(ptr);
119 3811
                AN(l);
120 3811
                vfps = VFP_Suck(vfc, ptr, &l);
121 3811
                if (l > 0 && vfps != VFP_ERROR) {
122 3162
                        req_bodybytes += l;
123 3162
                        if (yet >= l)
124 2312
                                yet -= l;
125 850
                        else if (yet > 0)
126 0
                                yet = 0;
127 3162
                        if (func != NULL) {
128 2327
                                if (vfps == VFP_END)
129 1126
                                        flush |= OBJ_ITER_END;
130 2327
                                r = func(priv, flush, ptr, l);
131 2327
                                if (r)
132 0
                                        break;
133 2327
                        } else {
134 1670
                                ObjExtend(req->wrk, req->body_oc, l,
135 835
                                    vfps == VFP_END ? 1 : 0);
136
                        }
137 3162
                }
138
139 3811
        } while (vfps == VFP_OK);
140 2075
        req->acct.req_bodybytes += VFP_Close(vfc);
141 2075
        VSLb_ts_req(req, "ReqBody", VTIM_real());
142 2075
        if (func != NULL) {
143 1650
                HSH_DerefBoc(req->wrk, req->body_oc);
144 1650
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
145 1650
                if (vfps == VFP_END && r == 0 && (flush & OBJ_ITER_END) == 0)
146 124
                        r = func(priv, flush | OBJ_ITER_END, NULL, 0);
147 1650
                if (vfps != VFP_END) {
148 400
                        req->req_body_status = BS_ERROR;
149 400
                        if (r == 0)
150 400
                                r = -1;
151 400
                }
152 1650
                return (r);
153
        }
154
155 425
        AZ(ObjSetU64(req->wrk, req->body_oc, OA_LEN, req_bodybytes));
156 425
        HSH_DerefBoc(req->wrk, req->body_oc);
157
158 425
        if (vfps != VFP_END) {
159 50
                req->req_body_status = BS_ERROR;
160 50
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
161 50
                return (-1);
162
        }
163
164 375
        assert(req_bodybytes >= 0);
165 375
        if (req_bodybytes != req->htc->content_length) {
166
                /* We must update also the "pristine" req.* copy */
167 100
                http_Unset(req->http0, H_Content_Length);
168 100
                http_Unset(req->http0, H_Transfer_Encoding);
169 200
                http_PrintfHeader(req->http0, "Content-Length: %ju",
170 100
                    (uintmax_t)req_bodybytes);
171
172 100
                http_Unset(req->http, H_Content_Length);
173 100
                http_Unset(req->http, H_Transfer_Encoding);
174 200
                http_PrintfHeader(req->http, "Content-Length: %ju",
175 100
                    (uintmax_t)req_bodybytes);
176 100
        }
177
178 375
        req->req_body_status = BS_CACHED;
179 375
        return (req_bodybytes);
180 2100
}
181
182
/*----------------------------------------------------------------------
183
 * Iterate over the req.body.
184
 *
185
 * This can be done exactly once if uncached, and multiple times if the
186
 * req.body is cached.
187
 *
188
 * return length or -1 on error
189
 */
190
191
ssize_t
192 2100
VRB_Iterate(struct worker *wrk, struct vsl_log *vsl,
193
    struct req *req, objiterate_f *func, void *priv)
194
{
195
        int i;
196
197 2100
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
198 2100
        AN(func);
199
200 2100
        if (req->req_body_status == BS_CACHED) {
201 450
                AN(req->body_oc);
202 450
                if (ObjIterate(wrk, req->body_oc, priv, func, 0))
203 0
                        return (-1);
204 450
                return (0);
205
        }
206 1650
        if (req->req_body_status == BS_NONE)
207 0
                return (0);
208 1650
        if (req->req_body_status == BS_TAKEN) {
209 0
                VSLb(vsl, SLT_VCL_Error,
210
                    "Uncached req.body can only be consumed once.");
211 0
                return (-1);
212
        }
213 1650
        if (req->req_body_status == BS_ERROR) {
214 0
                VSLb(vsl, SLT_FetchError,
215
                    "Had failed reading req.body before.");
216 0
                return (-1);
217
        }
218 1650
        Lck_Lock(&req->sp->mtx);
219 1650
        if (req->req_body_status->avail > 0) {
220 1650
                req->req_body_status = BS_TAKEN;
221 1650
                i = 0;
222 1650
        } else
223 0
                i = -1;
224 1650
        Lck_Unlock(&req->sp->mtx);
225 1650
        if (i) {
226 0
                VSLb(vsl, SLT_VCL_Error,
227
                    "Multiple attempts to access non-cached req.body");
228 0
                return (i);
229
        }
230 1650
        return (vrb_pull(req, -1, func, priv));
231 2100
}
232
233
/*----------------------------------------------------------------------
234
 * VRB_Ignore() is a dedicated function, because we might
235
 * be able to disuade or terminate its transmission in some protocols.
236
 *
237
 * For HTTP1, we do nothing if we are going to close the connection anyway or
238
 * just iterate it into oblivion.
239
 */
240
241
static int v_matchproto_(objiterate_f)
242 1100
httpq_req_body_discard(void *priv, unsigned flush, const void *ptr, ssize_t len)
243
{
244
245 1100
        (void)priv;
246 1100
        (void)flush;
247 1100
        (void)ptr;
248 1100
        (void)len;
249 1100
        return (0);
250
}
251
252
int
253 88694
VRB_Ignore(struct req *req)
254
{
255
256 88694
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
257
258 88694
        if (req->doclose != SC_NULL)
259 4990
                return (0);
260 83704
        if (req->req_body_status->avail > 0)
261 900
                (void)VRB_Iterate(req->wrk, req->vsl, req,
262
                    httpq_req_body_discard, NULL);
263 83704
        if (req->req_body_status == BS_ERROR)
264 125
                req->doclose = SC_RX_BODY;
265 83704
        return (0);
266 88694
}
267
268
/*----------------------------------------------------------------------
269
 */
270
271
void
272 85710
VRB_Free(struct req *req)
273
{
274
        int r;
275
276 85710
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
277
278 85710
        if (req->body_oc == NULL)
279 85335
                return;
280
281 375
        r = HSH_DerefObjCore(req->wrk, &req->body_oc, 0);
282
283
        // each busyobj may have gained a reference
284 375
        assert (r >= 0);
285 375
        assert ((unsigned)r <= req->restarts + 1);
286 85710
}
287
288
/*----------------------------------------------------------------------
289
 * Cache the req.body if it is smaller than the given size
290
 *
291
 * This function must be called before any backend fetches are kicked
292
 * off to prevent parallelism.
293
 */
294
295
ssize_t
296 650
VRB_Cache(struct req *req, ssize_t maxsize)
297
{
298
        uint64_t u;
299
300 650
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
301 650
        assert (req->req_step == R_STP_RECV);
302 650
        assert(maxsize >= 0);
303
304
        /*
305
         * We only allow caching to happen the first time through vcl_recv{}
306
         * where we know we will have no competition or conflicts for the
307
         * updates to req.http.* etc.
308
         */
309 650
        if (req->restarts > 0 && req->req_body_status != BS_CACHED) {
310 0
                VSLb(req->vsl, SLT_VCL_Error,
311
                    "req.body must be cached before restarts");
312 0
                return (-1);
313
        }
314
315 650
        if (req->req_body_status == BS_CACHED) {
316 75
                AZ(ObjGetU64(req->wrk, req->body_oc, OA_LEN, &u));
317 75
                return (u);
318
        }
319
320 575
        if (req->req_body_status->avail <= 0)
321 100
                return (req->req_body_status->avail);
322
323 475
        if (req->htc->content_length > maxsize) {
324 25
                req->req_body_status = BS_ERROR;
325 25
                (void)VFP_Error(req->vfc, "Request body too big to cache");
326 25
                return (-1);
327
        }
328
329 450
        return (vrb_pull(req, maxsize, NULL, NULL));
330 650
}