| | varnish-cache/bin/varnishd/cache/cache_fetch.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include "cache_varnishd.h" |
34 |
|
#include "cache_filter.h" |
35 |
|
#include "cache_objhead.h" |
36 |
|
#include "storage/storage.h" |
37 |
|
#include "vcl.h" |
38 |
|
#include "vtim.h" |
39 |
|
#include "vcc_interface.h" |
40 |
|
|
41 |
|
#define FETCH_STEPS \ |
42 |
|
FETCH_STEP(mkbereq, MKBEREQ) \ |
43 |
|
FETCH_STEP(retry, RETRY) \ |
44 |
|
FETCH_STEP(startfetch, STARTFETCH) \ |
45 |
|
FETCH_STEP(condfetch, CONDFETCH) \ |
46 |
|
FETCH_STEP(fetch, FETCH) \ |
47 |
|
FETCH_STEP(fetchbody, FETCHBODY) \ |
48 |
|
FETCH_STEP(fetchend, FETCHEND) \ |
49 |
|
FETCH_STEP(error, ERROR) \ |
50 |
|
FETCH_STEP(fail, FAIL) \ |
51 |
|
FETCH_STEP(done, DONE) |
52 |
|
|
53 |
|
typedef const struct fetch_step *vbf_state_f(struct worker *, struct busyobj *); |
54 |
|
|
55 |
|
struct fetch_step { |
56 |
|
const char *name; |
57 |
|
vbf_state_f *func; |
58 |
71999 |
}; |
59 |
71999 |
|
60 |
71999 |
#define FETCH_STEP(l, U) \ |
61 |
71999 |
static vbf_state_f vbf_stp_##l; \ |
62 |
|
static const struct fetch_step F_STP_##U[1] = {{ .name = "Fetch Step " #l, .func = vbf_stp_##l, }}; |
63 |
|
FETCH_STEPS |
64 |
|
#undef FETCH_STEP |
65 |
|
|
66 |
|
/*-------------------------------------------------------------------- |
67 |
|
* Allocate an object, with fall-back to Transient. |
68 |
|
* XXX: This somewhat overlaps the stuff in stevedore.c |
69 |
|
* XXX: Should this be merged over there ? |
70 |
|
*/ |
71 |
|
|
72 |
|
static int |
73 |
86998 |
vbf_allocobj(struct busyobj *bo, unsigned l) |
74 |
|
{ |
75 |
|
struct objcore *oc; |
76 |
|
const struct stevedore *stv; |
77 |
|
vtim_dur lifetime; |
78 |
|
|
79 |
86998 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
80 |
86998 |
oc = bo->fetch_objcore; |
81 |
86998 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
82 |
|
|
83 |
86998 |
lifetime = oc->ttl + oc->grace + oc->keep; |
84 |
|
|
85 |
86998 |
if (bo->uncacheable) { |
86 |
34559 |
stv = stv_transient; |
87 |
34559 |
bo->wrk->stats->beresp_uncacheable++; |
88 |
34559 |
} |
89 |
52439 |
else if (lifetime < cache_param->shortlived) { |
90 |
4840 |
stv = stv_transient; |
91 |
4840 |
bo->wrk->stats->beresp_shortlived++; |
92 |
4840 |
} |
93 |
|
else |
94 |
47599 |
stv = bo->storage; |
95 |
|
|
96 |
86998 |
bo->storage = NULL; |
97 |
|
|
98 |
86998 |
if (stv == NULL) |
99 |
40 |
return (0); |
100 |
|
|
101 |
86958 |
if (STV_NewObject(bo->wrk, oc, stv, l)) |
102 |
86678 |
return (1); |
103 |
|
|
104 |
280 |
if (stv == stv_transient) |
105 |
160 |
return (0); |
106 |
|
|
107 |
|
/* |
108 |
|
* Try to salvage the transaction by allocating a shortlived object |
109 |
|
* on Transient storage. |
110 |
|
*/ |
111 |
|
|
112 |
120 |
oc->ttl = vmin_t(float, oc->ttl, cache_param->shortlived); |
113 |
120 |
oc->grace = 0.0; |
114 |
120 |
oc->keep = 0.0; |
115 |
120 |
return (STV_NewObject(bo->wrk, oc, stv_transient, l)); |
116 |
86998 |
} |
117 |
|
|
118 |
|
static void |
119 |
78519 |
vbf_cleanup(struct busyobj *bo) |
120 |
|
{ |
121 |
|
struct vfp_ctx *vfc; |
122 |
|
|
123 |
78519 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
124 |
78519 |
vfc = bo->vfc; |
125 |
78519 |
CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC); |
126 |
|
|
127 |
78519 |
bo->acct.beresp_bodybytes += VFP_Close(vfc); |
128 |
78519 |
bo->vfp_filter_list = NULL; |
129 |
|
|
130 |
78519 |
if (bo->director_state != DIR_S_NULL) |
131 |
78197 |
VDI_Finish(bo); |
132 |
78519 |
} |
133 |
|
|
134 |
|
void |
135 |
360 |
Bereq_Rollback(VRT_CTX) |
136 |
|
{ |
137 |
|
struct busyobj *bo; |
138 |
|
|
139 |
360 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
140 |
360 |
bo = ctx->bo; |
141 |
360 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
142 |
|
|
143 |
400 |
if (bo->htc != NULL && |
144 |
320 |
bo->htc->body_status != BS_NONE && |
145 |
40 |
bo->htc->body_status != BS_TAKEN) |
146 |
40 |
bo->htc->doclose = SC_RESP_CLOSE; |
147 |
|
|
148 |
360 |
vbf_cleanup(bo); |
149 |
360 |
VCL_TaskLeave(ctx, bo->privs); |
150 |
360 |
VCL_TaskEnter(bo->privs); |
151 |
360 |
HTTP_Clone(bo->bereq, bo->bereq0); |
152 |
360 |
bo->vfp_filter_list = NULL; |
153 |
360 |
bo->err_reason = NULL; |
154 |
360 |
AN(bo->ws_bo); |
155 |
360 |
WS_Rollback(bo->ws, bo->ws_bo); |
156 |
360 |
} |
157 |
|
|
158 |
|
/*-------------------------------------------------------------------- |
159 |
|
* Turn the beresp into a obj |
160 |
|
*/ |
161 |
|
|
162 |
|
static int |
163 |
86998 |
vbf_beresp2obj(struct busyobj *bo) |
164 |
|
{ |
165 |
|
unsigned l, l2; |
166 |
|
const char *b; |
167 |
|
uint8_t *bp; |
168 |
86998 |
struct vsb *vary = NULL; |
169 |
86998 |
int varyl = 0; |
170 |
|
struct objcore *oc; |
171 |
|
|
172 |
86998 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
173 |
86998 |
oc = bo->fetch_objcore; |
174 |
86998 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
175 |
|
|
176 |
86998 |
l = 0; |
177 |
|
|
178 |
|
/* Create Vary instructions */ |
179 |
86998 |
if (!(oc->flags & OC_F_PRIVATE)) { |
180 |
56240 |
varyl = VRY_Create(bo, &vary); |
181 |
56240 |
if (varyl > 0) { |
182 |
8160 |
AN(vary); |
183 |
8160 |
assert(varyl == VSB_len(vary)); |
184 |
8160 |
l += PRNDUP((intptr_t)varyl); |
185 |
56240 |
} else if (varyl < 0) { |
186 |
|
/* |
187 |
|
* Vary parse error |
188 |
|
* Complain about it, and make this a pass. |
189 |
|
*/ |
190 |
200 |
VSLb(bo->vsl, SLT_Error, |
191 |
|
"Illegal 'Vary' header from backend, " |
192 |
|
"making this a pass."); |
193 |
200 |
bo->uncacheable = 1; |
194 |
200 |
AZ(vary); |
195 |
200 |
} else |
196 |
|
/* No vary */ |
197 |
47880 |
AZ(vary); |
198 |
56240 |
} |
199 |
|
|
200 |
173996 |
l2 = http_EstimateWS(bo->beresp, |
201 |
86998 |
bo->uncacheable ? HTTPH_A_PASS : HTTPH_A_INS); |
202 |
86998 |
l += l2; |
203 |
|
|
204 |
86998 |
if (bo->uncacheable) |
205 |
34558 |
oc->flags |= OC_F_HFM; |
206 |
|
|
207 |
86998 |
if (!vbf_allocobj(bo, l)) { |
208 |
240 |
if (vary != NULL) |
209 |
0 |
VSB_destroy(&vary); |
210 |
240 |
AZ(vary); |
211 |
240 |
return (VFP_Error(bo->vfc, "Could not get storage")); |
212 |
|
} |
213 |
|
|
214 |
86758 |
if (vary != NULL) { |
215 |
8160 |
AN(ObjSetAttr(bo->wrk, oc, OA_VARY, varyl, VSB_data(vary))); |
216 |
8160 |
VSB_destroy(&vary); |
217 |
8160 |
} |
218 |
|
|
219 |
86758 |
AZ(ObjSetXID(bo->wrk, oc, bo->vsl->wid)); |
220 |
|
|
221 |
|
/* for HTTP_Encode() VSLH call */ |
222 |
86758 |
bo->beresp->logtag = SLT_ObjMethod; |
223 |
|
|
224 |
|
/* Filter into object */ |
225 |
86758 |
bp = ObjSetAttr(bo->wrk, oc, OA_HEADERS, l2, NULL); |
226 |
86758 |
AN(bp); |
227 |
173516 |
HTTP_Encode(bo->beresp, bp, l2, |
228 |
86758 |
bo->uncacheable ? HTTPH_A_PASS : HTTPH_A_INS); |
229 |
|
|
230 |
86758 |
if (http_GetHdr(bo->beresp, H_Last_Modified, &b)) |
231 |
1720 |
AZ(ObjSetDouble(bo->wrk, oc, OA_LASTMODIFIED, VTIM_parse(b))); |
232 |
|
else |
233 |
85038 |
AZ(ObjSetDouble(bo->wrk, oc, OA_LASTMODIFIED, |
234 |
|
floor(oc->t_origin))); |
235 |
|
|
236 |
86758 |
return (0); |
237 |
86998 |
} |
238 |
|
|
239 |
|
/*-------------------------------------------------------------------- |
240 |
|
* Copy req->bereq and release req if no body |
241 |
|
*/ |
242 |
|
|
243 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
244 |
87918 |
vbf_stp_mkbereq(struct worker *wrk, struct busyobj *bo) |
245 |
|
{ |
246 |
|
const char *q; |
247 |
|
struct objcore *oc; |
248 |
|
|
249 |
87918 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
250 |
87918 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
251 |
87918 |
CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); |
252 |
87918 |
oc = bo->fetch_objcore; |
253 |
87918 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
254 |
|
|
255 |
87918 |
assert(oc->boc->state == BOS_INVALID); |
256 |
87918 |
AZ(bo->storage); |
257 |
|
|
258 |
87918 |
HTTP_Setup(bo->bereq0, bo->ws, bo->vsl, SLT_BereqMethod); |
259 |
175836 |
http_FilterReq(bo->bereq0, bo->req->http, |
260 |
87918 |
bo->uncacheable ? HTTPH_R_PASS : HTTPH_R_FETCH); |
261 |
|
|
262 |
87918 |
if (bo->uncacheable) |
263 |
30718 |
AZ(bo->stale_oc); |
264 |
|
else { |
265 |
57200 |
http_ForceField(bo->bereq0, HTTP_HDR_METHOD, "GET"); |
266 |
57200 |
if (cache_param->http_gzip_support) |
267 |
57000 |
http_ForceHeader(bo->bereq0, H_Accept_Encoding, "gzip"); |
268 |
|
} |
269 |
87918 |
http_ForceField(bo->bereq0, HTTP_HDR_PROTO, "HTTP/1.1"); |
270 |
|
|
271 |
89078 |
if (bo->stale_oc != NULL && |
272 |
5720 |
ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND) && |
273 |
1280 |
(bo->stale_oc->boc != NULL || ObjGetLen(wrk, bo->stale_oc) != 0)) { |
274 |
1240 |
AZ(bo->stale_oc->flags & (OC_F_HFM|OC_F_PRIVATE)); |
275 |
1240 |
q = RFC2616_Strong_LM(NULL, wrk, bo->stale_oc); |
276 |
1240 |
if (q != NULL) |
277 |
1040 |
http_PrintfHeader(bo->bereq0, |
278 |
520 |
"If-Modified-Since: %s", q); |
279 |
1240 |
q = HTTP_GetHdrPack(bo->wrk, bo->stale_oc, H_ETag); |
280 |
1240 |
if (q != NULL) |
281 |
1520 |
http_PrintfHeader(bo->bereq0, |
282 |
760 |
"If-None-Match: %s", q); |
283 |
1240 |
} |
284 |
|
|
285 |
87918 |
http_CopyHome(bo->bereq0); |
286 |
87918 |
HTTP_Setup(bo->bereq, bo->ws, bo->vsl, SLT_BereqMethod); |
287 |
87918 |
bo->ws_bo = WS_Snapshot(bo->ws); |
288 |
87918 |
HTTP_Clone(bo->bereq, bo->bereq0); |
289 |
|
|
290 |
87918 |
if (bo->req->req_body_status->avail == 0) { |
291 |
84526 |
bo->req = NULL; |
292 |
84526 |
ObjSetState(bo->wrk, oc, BOS_REQ_DONE); |
293 |
87918 |
} else if (bo->req->req_body_status == BS_CACHED) { |
294 |
920 |
AN(bo->req->body_oc); |
295 |
920 |
bo->bereq_body = bo->req->body_oc; |
296 |
920 |
HSH_Ref(bo->bereq_body); |
297 |
920 |
bo->req = NULL; |
298 |
920 |
ObjSetState(bo->wrk, oc, BOS_REQ_DONE); |
299 |
920 |
} |
300 |
87918 |
return (F_STP_STARTFETCH); |
301 |
|
} |
302 |
|
|
303 |
|
/*-------------------------------------------------------------------- |
304 |
|
* Start a new VSL transaction and try again |
305 |
|
* Prepare the busyobj and fetch processors |
306 |
|
*/ |
307 |
|
|
308 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
309 |
1440 |
vbf_stp_retry(struct worker *wrk, struct busyobj *bo) |
310 |
|
{ |
311 |
1440 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
312 |
1440 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
313 |
|
|
314 |
1440 |
assert(bo->fetch_objcore->boc->state <= BOS_REQ_DONE); |
315 |
|
|
316 |
1440 |
if (bo->no_retry != NULL) { |
317 |
160 |
VSLb(bo->vsl, SLT_Error, |
318 |
80 |
"Retry not possible, %s", bo->no_retry); |
319 |
80 |
return (F_STP_FAIL); |
320 |
|
} |
321 |
|
|
322 |
1360 |
VSLb_ts_busyobj(bo, "Retry", W_TIM_real(wrk)); |
323 |
|
|
324 |
|
/* VDI_Finish (via vbf_cleanup) must have been called before */ |
325 |
1360 |
assert(bo->director_state == DIR_S_NULL); |
326 |
|
|
327 |
|
/* reset other bo attributes - See VBO_GetBusyObj */ |
328 |
1360 |
bo->storage = NULL; |
329 |
1360 |
bo->do_esi = 0; |
330 |
1360 |
bo->do_stream = 1; |
331 |
1360 |
bo->was_304 = 0; |
332 |
1360 |
bo->err_code = 0; |
333 |
1360 |
bo->err_reason = NULL; |
334 |
1360 |
bo->connect_timeout = NAN; |
335 |
1360 |
bo->first_byte_timeout = NAN; |
336 |
1360 |
bo->between_bytes_timeout = NAN; |
337 |
1360 |
if (bo->htc != NULL) |
338 |
0 |
bo->htc->doclose = SC_NULL; |
339 |
|
|
340 |
|
// XXX: BereqEnd + BereqAcct ? |
341 |
1360 |
VSL_ChgId(bo->vsl, "bereq", "retry", VXID_Get(wrk, VSL_BACKENDMARKER)); |
342 |
1360 |
VSLb_ts_busyobj(bo, "Start", bo->t_prev); |
343 |
1360 |
http_VSL_log(bo->bereq); |
344 |
|
|
345 |
1360 |
return (F_STP_STARTFETCH); |
346 |
1440 |
} |
347 |
|
|
348 |
|
/*-------------------------------------------------------------------- |
349 |
|
* 304 setup logic |
350 |
|
*/ |
351 |
|
|
352 |
|
static int |
353 |
1320 |
vbf_304_logic(struct busyobj *bo) |
354 |
|
{ |
355 |
1320 |
if (bo->stale_oc != NULL && |
356 |
1160 |
ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND)) { |
357 |
1120 |
AZ(bo->stale_oc->flags & (OC_F_HFM|OC_F_PRIVATE)); |
358 |
1120 |
if (ObjCheckFlag(bo->wrk, bo->stale_oc, OF_CHGCE)) { |
359 |
|
/* |
360 |
|
* If a VFP changed C-E in the stored |
361 |
|
* object, then don't overwrite C-E from |
362 |
|
* the IMS fetch, and we must weaken any |
363 |
|
* new ETag we get. |
364 |
|
*/ |
365 |
80 |
RFC2616_Weaken_Etag(bo->beresp); |
366 |
80 |
} |
367 |
1120 |
http_Unset(bo->beresp, H_Content_Encoding); |
368 |
1120 |
http_Unset(bo->beresp, H_Content_Length); |
369 |
1120 |
HTTP_Merge(bo->wrk, bo->stale_oc, bo->beresp); |
370 |
1120 |
assert(http_IsStatus(bo->beresp, 200)); |
371 |
1120 |
bo->was_304 = 1; |
372 |
1320 |
} else if (!bo->uncacheable) { |
373 |
|
/* |
374 |
|
* Backend sent unallowed 304 |
375 |
|
*/ |
376 |
40 |
VSLb(bo->vsl, SLT_Error, |
377 |
|
"304 response but not conditional fetch"); |
378 |
40 |
bo->htc->doclose = SC_RX_BAD; |
379 |
40 |
vbf_cleanup(bo); |
380 |
40 |
return (-1); |
381 |
|
} |
382 |
1280 |
return (1); |
383 |
1320 |
} |
384 |
|
|
385 |
|
/*-------------------------------------------------------------------- |
386 |
|
* Setup bereq from bereq0, run vcl_backend_fetch |
387 |
|
*/ |
388 |
|
|
389 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
390 |
89273 |
vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo) |
391 |
|
{ |
392 |
|
int i; |
393 |
|
vtim_real now; |
394 |
|
unsigned handling; |
395 |
|
struct objcore *oc; |
396 |
|
|
397 |
89273 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
398 |
89273 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
399 |
89273 |
oc = bo->fetch_objcore; |
400 |
89273 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
401 |
|
|
402 |
89273 |
AZ(bo->storage); |
403 |
89273 |
bo->storage = bo->uncacheable ? stv_transient : STV_next(); |
404 |
|
|
405 |
89273 |
if (bo->retries > 0) |
406 |
1360 |
http_Unset(bo->bereq, "\012X-Varnish:"); |
407 |
|
|
408 |
89273 |
http_PrintfHeader(bo->bereq, "X-Varnish: %ju", VXID(bo->vsl->wid)); |
409 |
|
|
410 |
89273 |
VCL_backend_fetch_method(bo->vcl, wrk, NULL, bo, NULL); |
411 |
|
|
412 |
89273 |
if (wrk->vpi->handling == VCL_RET_ABANDON || |
413 |
89152 |
wrk->vpi->handling == VCL_RET_FAIL) |
414 |
275 |
return (F_STP_FAIL); |
415 |
|
|
416 |
88998 |
assert (wrk->vpi->handling == VCL_RET_FETCH || |
417 |
|
wrk->vpi->handling == VCL_RET_ERROR); |
418 |
|
|
419 |
88998 |
HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); |
420 |
|
|
421 |
88998 |
assert(oc->boc->state <= BOS_REQ_DONE); |
422 |
|
|
423 |
88998 |
AZ(bo->htc); |
424 |
|
|
425 |
88998 |
VFP_Setup(bo->vfc, wrk); |
426 |
88998 |
bo->vfc->oc = oc; |
427 |
88998 |
bo->vfc->resp = bo->beresp; |
428 |
88998 |
bo->vfc->req = bo->bereq; |
429 |
|
|
430 |
88998 |
if (wrk->vpi->handling == VCL_RET_ERROR) |
431 |
400 |
return (F_STP_ERROR); |
432 |
|
|
433 |
88598 |
VSLb_ts_busyobj(bo, "Fetch", W_TIM_real(wrk)); |
434 |
88598 |
i = VDI_GetHdr(bo); |
435 |
88598 |
if (bo->htc != NULL) |
436 |
78478 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
437 |
|
|
438 |
88598 |
bo->t_resp = now = W_TIM_real(wrk); |
439 |
88598 |
VSLb_ts_busyobj(bo, "Beresp", now); |
440 |
|
|
441 |
88598 |
if (i) { |
442 |
10077 |
assert(bo->director_state == DIR_S_NULL); |
443 |
10077 |
return (F_STP_ERROR); |
444 |
|
} |
445 |
|
|
446 |
78521 |
if (bo->htc != NULL && bo->htc->body_status == BS_ERROR) { |
447 |
240 |
bo->htc->doclose = SC_RX_BODY; |
448 |
240 |
vbf_cleanup(bo); |
449 |
240 |
VSLb(bo->vsl, SLT_Error, "Body cannot be fetched"); |
450 |
240 |
assert(bo->director_state == DIR_S_NULL); |
451 |
240 |
return (F_STP_ERROR); |
452 |
|
} |
453 |
|
|
454 |
78281 |
if (!http_GetHdr(bo->beresp, H_Date, NULL)) { |
455 |
|
/* |
456 |
|
* RFC 2616 14.18 Date: The Date general-header field |
457 |
|
* represents the date and time at which the message was |
458 |
|
* originated, having the same semantics as orig-date in |
459 |
|
* RFC 822. ... A received message that does not have a |
460 |
|
* Date header field MUST be assigned one by the recipient |
461 |
|
* if the message will be cached by that recipient or |
462 |
|
* gatewayed via a protocol which requires a Date. |
463 |
|
* |
464 |
|
* If we didn't get a Date header, we assign one here. |
465 |
|
*/ |
466 |
3320 |
http_TimeHeader(bo->beresp, "Date: ", now); |
467 |
3320 |
} |
468 |
|
|
469 |
|
/* |
470 |
|
* These two headers can be spread over multiple actual headers |
471 |
|
* and we rely on their content outside of VCL, so collect them |
472 |
|
* into one line here. |
473 |
|
*/ |
474 |
78277 |
http_CollectHdr(bo->beresp, H_Cache_Control); |
475 |
78277 |
http_CollectHdr(bo->beresp, H_Vary); |
476 |
|
|
477 |
|
/* What does RFC2616 think about TTL ? */ |
478 |
156554 |
RFC2616_Ttl(bo, now, |
479 |
78277 |
&oc->t_origin, |
480 |
78277 |
&oc->ttl, |
481 |
78277 |
&oc->grace, |
482 |
78277 |
&oc->keep); |
483 |
|
|
484 |
78277 |
AZ(bo->do_esi); |
485 |
78277 |
AZ(bo->was_304); |
486 |
|
|
487 |
78277 |
if (http_IsStatus(bo->beresp, 304) && vbf_304_logic(bo) < 0) |
488 |
40 |
return (F_STP_ERROR); |
489 |
|
|
490 |
78237 |
if (bo->htc != NULL && bo->htc->doclose == SC_NULL && |
491 |
74555 |
http_GetHdrField(bo->bereq, H_Connection, "close", NULL)) |
492 |
520 |
bo->htc->doclose = SC_REQ_CLOSE; |
493 |
|
|
494 |
78237 |
VCL_backend_response_method(bo->vcl, wrk, NULL, bo, NULL); |
495 |
|
|
496 |
78237 |
if (bo->htc != NULL && bo->htc->doclose == SC_NULL && |
497 |
73676 |
http_GetHdrField(bo->beresp, H_Connection, "close", NULL)) |
498 |
40 |
bo->htc->doclose = SC_RESP_CLOSE; |
499 |
|
|
500 |
78157 |
if (VRG_CheckBo(bo) < 0) { |
501 |
280 |
if (bo->director_state != DIR_S_NULL) |
502 |
240 |
VDI_Finish(bo); |
503 |
280 |
return (F_STP_ERROR); |
504 |
|
} |
505 |
|
|
506 |
155553 |
if (wrk->vpi->handling == VCL_RET_ABANDON || |
507 |
77756 |
wrk->vpi->handling == VCL_RET_FAIL || |
508 |
77676 |
wrk->vpi->handling == VCL_RET_ERROR) { |
509 |
|
/* do not count deliberately ending the backend connection as |
510 |
|
* fetch failure |
511 |
|
*/ |
512 |
360 |
handling = wrk->vpi->handling; |
513 |
360 |
if (bo->htc) |
514 |
360 |
bo->htc->doclose = SC_RESP_CLOSE; |
515 |
360 |
vbf_cleanup(bo); |
516 |
360 |
wrk->vpi->handling = handling; |
517 |
|
|
518 |
360 |
if (wrk->vpi->handling == VCL_RET_ERROR) |
519 |
160 |
return (F_STP_ERROR); |
520 |
|
else |
521 |
200 |
return (F_STP_FAIL); |
522 |
|
} |
523 |
|
|
524 |
77517 |
if (wrk->vpi->handling == VCL_RET_RETRY) { |
525 |
1160 |
if (bo->htc && bo->htc->body_status != BS_NONE) |
526 |
280 |
bo->htc->doclose = SC_RESP_CLOSE; |
527 |
1160 |
vbf_cleanup(bo); |
528 |
|
|
529 |
1160 |
if (bo->retries++ < cache_param->max_retries) |
530 |
1080 |
return (F_STP_RETRY); |
531 |
|
|
532 |
80 |
VSLb(bo->vsl, SLT_VCL_Error, |
533 |
|
"Too many retries, delivering 503"); |
534 |
80 |
assert(bo->director_state == DIR_S_NULL); |
535 |
80 |
return (F_STP_ERROR); |
536 |
|
} |
537 |
|
|
538 |
76357 |
VSLb_ts_busyobj(bo, "Process", W_TIM_real(wrk)); |
539 |
76357 |
assert(oc->boc->state <= BOS_REQ_DONE); |
540 |
76357 |
if (oc->boc->state != BOS_REQ_DONE) { |
541 |
1600 |
bo->req = NULL; |
542 |
1600 |
ObjSetState(wrk, oc, BOS_REQ_DONE); |
543 |
1600 |
} |
544 |
|
|
545 |
76357 |
if (bo->do_esi) |
546 |
12400 |
bo->do_stream = 0; |
547 |
76357 |
if (wrk->vpi->handling == VCL_RET_PASS) { |
548 |
799 |
oc->flags |= OC_F_HFP; |
549 |
799 |
bo->uncacheable = 1; |
550 |
799 |
wrk->vpi->handling = VCL_RET_DELIVER; |
551 |
799 |
} |
552 |
76357 |
if (!bo->uncacheable || !bo->do_stream) |
553 |
52640 |
oc->boc->transit_buffer = 0; |
554 |
76357 |
if (bo->uncacheable) |
555 |
29237 |
oc->flags |= OC_F_HFM; |
556 |
|
|
557 |
76357 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
558 |
|
|
559 |
76357 |
return (bo->was_304 ? F_STP_CONDFETCH : F_STP_FETCH); |
560 |
89189 |
} |
561 |
|
|
562 |
|
/*-------------------------------------------------------------------- |
563 |
|
*/ |
564 |
|
|
565 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
566 |
43211 |
vbf_stp_fetchbody(struct worker *wrk, struct busyobj *bo) |
567 |
|
{ |
568 |
|
ssize_t l; |
569 |
|
uint8_t *ptr; |
570 |
43211 |
enum vfp_status vfps = VFP_ERROR; |
571 |
|
ssize_t est; |
572 |
|
struct vfp_ctx *vfc; |
573 |
|
struct objcore *oc; |
574 |
|
|
575 |
43211 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
576 |
43211 |
vfc = bo->vfc; |
577 |
43211 |
CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC); |
578 |
43211 |
oc = bo->fetch_objcore; |
579 |
43211 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
580 |
|
|
581 |
43211 |
AN(vfc->vfp_nxt); |
582 |
|
|
583 |
43211 |
est = bo->htc->content_length; |
584 |
43211 |
if (est < 0) |
585 |
5880 |
est = 0; |
586 |
|
|
587 |
43211 |
do { |
588 |
2236796 |
if (oc->flags & OC_F_CANCEL) { |
589 |
|
/* |
590 |
|
* A pass object and delivery was terminated |
591 |
|
* We don't fail the fetch, in order for HitMiss |
592 |
|
* objects to be created. |
593 |
|
*/ |
594 |
113 |
AN(oc->flags & OC_F_HFM); |
595 |
113 |
VSLb(wrk->vsl, SLT_Debug, |
596 |
|
"Fetch: Pass delivery abandoned"); |
597 |
113 |
bo->htc->doclose = SC_RX_BODY; |
598 |
113 |
break; |
599 |
|
} |
600 |
2236683 |
AZ(vfc->failed); |
601 |
2236683 |
l = est; |
602 |
2236683 |
assert(l >= 0); |
603 |
2236683 |
if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK) { |
604 |
160 |
bo->htc->doclose = SC_RX_BODY; |
605 |
160 |
break; |
606 |
|
} |
607 |
|
|
608 |
2236523 |
AZ(vfc->failed); |
609 |
2236523 |
vfps = VFP_Suck(vfc, ptr, &l); |
610 |
2236523 |
if (l >= 0 && vfps != VFP_ERROR) { |
611 |
2235232 |
VFP_Extend(vfc, l, vfps); |
612 |
2235232 |
if (est >= l) |
613 |
113225 |
est -= l; |
614 |
|
else |
615 |
2122007 |
est = 0; |
616 |
2235232 |
} |
617 |
2236523 |
} while (vfps == VFP_OK); |
618 |
|
|
619 |
43197 |
if (vfc->failed) { |
620 |
1440 |
(void)VFP_Error(vfc, "Fetch pipeline failed to process"); |
621 |
1440 |
bo->htc->doclose = SC_RX_BODY; |
622 |
1440 |
vbf_cleanup(bo); |
623 |
1440 |
if (!bo->do_stream) { |
624 |
840 |
assert(oc->boc->state < BOS_STREAM); |
625 |
|
// XXX: doclose = ? |
626 |
840 |
return (F_STP_ERROR); |
627 |
|
} else { |
628 |
600 |
wrk->stats->fetch_failed++; |
629 |
600 |
return (F_STP_FAIL); |
630 |
|
} |
631 |
|
} |
632 |
|
|
633 |
41757 |
return (F_STP_FETCHEND); |
634 |
43197 |
} |
635 |
|
|
636 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
637 |
75274 |
vbf_stp_fetch(struct worker *wrk, struct busyobj *bo) |
638 |
|
{ |
639 |
|
struct vrt_ctx ctx[1]; |
640 |
|
struct objcore *oc; |
641 |
|
|
642 |
75274 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
643 |
75274 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
644 |
75274 |
oc = bo->fetch_objcore; |
645 |
75274 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
646 |
|
|
647 |
75274 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
648 |
|
|
649 |
75274 |
if (bo->htc == NULL) { |
650 |
80 |
(void)VFP_Error(bo->vfc, "No backend connection (rollback?)"); |
651 |
80 |
vbf_cleanup(bo); |
652 |
80 |
return (F_STP_ERROR); |
653 |
|
} |
654 |
|
|
655 |
|
/* No body -> done */ |
656 |
75194 |
if (bo->htc->body_status == BS_NONE || bo->htc->content_length == 0) { |
657 |
28880 |
http_Unset(bo->beresp, H_Content_Encoding); |
658 |
28880 |
bo->do_gzip = bo->do_gunzip = 0; |
659 |
28880 |
bo->do_stream = 0; |
660 |
28880 |
bo->vfp_filter_list = ""; |
661 |
75196 |
} else if (bo->vfp_filter_list == NULL) { |
662 |
46078 |
bo->vfp_filter_list = VBF_Get_Filter_List(bo); |
663 |
46078 |
} |
664 |
|
|
665 |
75196 |
if (bo->vfp_filter_list == NULL || |
666 |
75195 |
VCL_StackVFP(bo->vfc, bo->vcl, bo->vfp_filter_list)) { |
667 |
957 |
(bo)->htc->doclose = SC_OVERLOAD; |
668 |
957 |
vbf_cleanup(bo); |
669 |
957 |
return (F_STP_ERROR); |
670 |
|
} |
671 |
|
|
672 |
74239 |
if (oc->flags & OC_F_PRIVATE) |
673 |
24719 |
AN(bo->uncacheable); |
674 |
|
|
675 |
74239 |
oc->boc->fetched_so_far = 0; |
676 |
|
|
677 |
74239 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
678 |
74239 |
VCL_Bo2Ctx(ctx, bo); |
679 |
|
|
680 |
74239 |
if (VFP_Open(ctx, bo->vfc)) { |
681 |
2120 |
(void)VFP_Error(bo->vfc, "Fetch pipeline failed to open"); |
682 |
2120 |
bo->htc->doclose = SC_RX_BODY; |
683 |
2120 |
vbf_cleanup(bo); |
684 |
2120 |
return (F_STP_ERROR); |
685 |
|
} |
686 |
|
|
687 |
72119 |
if (vbf_beresp2obj(bo)) { |
688 |
120 |
bo->htc->doclose = SC_RX_BODY; |
689 |
120 |
vbf_cleanup(bo); |
690 |
120 |
return (F_STP_ERROR); |
691 |
|
} |
692 |
|
|
693 |
|
#define OBJ_FLAG(U, l, v) \ |
694 |
|
if (bo->vfc->obj_flags & OF_##U) \ |
695 |
|
ObjSetFlag(bo->wrk, oc, OF_##U, 1); |
696 |
|
#include "tbl/obj_attr.h" |
697 |
|
|
698 |
71999 |
if (!(oc->flags & OC_F_HFM) && |
699 |
45718 |
http_IsStatus(bo->beresp, 200) && ( |
700 |
45077 |
RFC2616_Strong_LM(bo->beresp, NULL, NULL) != NULL || |
701 |
44122 |
http_GetHdr(bo->beresp, H_ETag, NULL))) |
702 |
2074 |
ObjSetFlag(bo->wrk, oc, OF_IMSCAND, 1); |
703 |
|
|
704 |
71999 |
assert(oc->boc->refcount >= 1); |
705 |
|
|
706 |
71999 |
assert(oc->boc->state == BOS_REQ_DONE); |
707 |
|
|
708 |
71999 |
if (bo->do_stream) { |
709 |
31956 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
710 |
31956 |
HSH_Unbusy(wrk, oc); |
711 |
31956 |
ObjSetState(wrk, oc, BOS_STREAM); |
712 |
31956 |
} |
713 |
|
|
714 |
|
VSLb(bo->vsl, SLT_Fetch_Body, "%u %s %s", |
715 |
|
bo->htc->body_status->nbr, bo->htc->body_status->name, |
716 |
|
bo->do_stream ? "stream" : "-"); |
717 |
|
|
718 |
71999 |
if (bo->htc->body_status != BS_NONE) { |
719 |
43200 |
assert(bo->htc->body_status != BS_ERROR); |
720 |
43200 |
return (F_STP_FETCHBODY); |
721 |
|
} |
722 |
28799 |
AZ(bo->vfc->failed); |
723 |
|
return (F_STP_FETCHEND); |
724 |
|
} |
725 |
|
|
726 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
727 |
71517 |
vbf_stp_fetchend(struct worker *wrk, struct busyobj *bo) |
728 |
|
{ |
729 |
|
|
730 |
|
struct objcore *oc; |
731 |
|
|
732 |
71517 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
733 |
71517 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
734 |
71517 |
oc = bo->fetch_objcore; |
735 |
71517 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
736 |
|
|
737 |
71517 |
AZ(bo->vfc->failed); |
738 |
|
|
739 |
|
/* Recycle the backend connection before setting BOS_FINISHED to |
740 |
|
give predictable backend reuse behavior for varnishtest */ |
741 |
71517 |
vbf_cleanup(bo); |
742 |
|
|
743 |
71517 |
AZ(ObjSetU64(wrk, oc, OA_LEN, oc->boc->fetched_so_far)); |
744 |
|
|
745 |
71517 |
if (bo->do_stream) |
746 |
32277 |
assert(oc->boc->state == BOS_STREAM); |
747 |
|
else { |
748 |
39240 |
assert(oc->boc->state == BOS_REQ_DONE); |
749 |
39240 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
750 |
39240 |
HSH_Unbusy(wrk, oc); |
751 |
|
} |
752 |
|
|
753 |
71517 |
ObjSetState(wrk, oc, BOS_FINISHED); |
754 |
71517 |
VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk)); |
755 |
71517 |
if (bo->stale_oc != NULL) { |
756 |
5760 |
VSL(SLT_ExpKill, NO_VXID, "VBF_Superseded x=%ju n=%ju", |
757 |
2880 |
VXID(ObjGetXID(wrk, bo->stale_oc)), |
758 |
2880 |
VXID(ObjGetXID(wrk, bo->fetch_objcore))); |
759 |
2880 |
HSH_Replace(bo->stale_oc, bo->fetch_objcore); |
760 |
2880 |
} |
761 |
71517 |
return (F_STP_DONE); |
762 |
|
} |
763 |
|
|
764 |
|
/*-------------------------------------------------------------------- |
765 |
|
*/ |
766 |
|
|
767 |
|
struct vbf_objiter_priv { |
768 |
|
unsigned magic; |
769 |
|
#define VBF_OBITER_PRIV_MAGIC 0x3c272a17 |
770 |
|
struct busyobj *bo; |
771 |
|
// not yet allocated |
772 |
|
ssize_t l; |
773 |
|
// current allocation |
774 |
|
uint8_t *p; |
775 |
|
ssize_t pl; |
776 |
|
}; |
777 |
|
|
778 |
|
static int v_matchproto_(objiterate_f) |
779 |
1000 |
vbf_objiterate(void *priv, unsigned flush, const void *ptr, ssize_t len) |
780 |
|
{ |
781 |
|
struct vbf_objiter_priv *vop; |
782 |
|
ssize_t l; |
783 |
1000 |
const uint8_t *ps = ptr; |
784 |
|
|
785 |
1000 |
CAST_OBJ_NOTNULL(vop, priv, VBF_OBITER_PRIV_MAGIC); |
786 |
1000 |
CHECK_OBJ_NOTNULL(vop->bo, BUSYOBJ_MAGIC); |
787 |
|
|
788 |
1000 |
flush &= OBJ_ITER_END; |
789 |
|
|
790 |
2000 |
while (len > 0) { |
791 |
1000 |
if (vop->pl == 0) { |
792 |
960 |
vop->p = NULL; |
793 |
960 |
AN(vop->l); |
794 |
960 |
vop->pl = vop->l; |
795 |
1920 |
if (VFP_GetStorage(vop->bo->vfc, &vop->pl, &vop->p) |
796 |
960 |
!= VFP_OK) |
797 |
0 |
return (1); |
798 |
960 |
if (vop->pl < vop->l) |
799 |
40 |
vop->l -= vop->pl; |
800 |
|
else |
801 |
920 |
vop->l = 0; |
802 |
960 |
} |
803 |
1000 |
AN(vop->pl); |
804 |
1000 |
AN(vop->p); |
805 |
|
|
806 |
1000 |
l = vmin(vop->pl, len); |
807 |
1000 |
memcpy(vop->p, ps, l); |
808 |
1920 |
VFP_Extend(vop->bo->vfc, l, |
809 |
1000 |
flush && l == len ? VFP_END : VFP_OK); |
810 |
1000 |
ps += l; |
811 |
1000 |
vop->p += l; |
812 |
1000 |
len -= l; |
813 |
1000 |
vop->pl -= l; |
814 |
|
} |
815 |
1000 |
if (flush) |
816 |
920 |
AZ(vop->l); |
817 |
1000 |
return (0); |
818 |
1000 |
} |
819 |
|
|
820 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
821 |
1080 |
vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo) |
822 |
|
{ |
823 |
|
struct boc *stale_boc; |
824 |
|
enum boc_state_e stale_state; |
825 |
|
struct objcore *oc, *stale_oc; |
826 |
|
struct vbf_objiter_priv vop[1]; |
827 |
|
|
828 |
1080 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
829 |
1080 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
830 |
1080 |
oc = bo->fetch_objcore; |
831 |
1080 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
832 |
1080 |
stale_oc = bo->stale_oc; |
833 |
1080 |
CHECK_OBJ_NOTNULL(stale_oc, OBJCORE_MAGIC); |
834 |
|
|
835 |
1080 |
stale_boc = HSH_RefBoc(stale_oc); |
836 |
1080 |
CHECK_OBJ_ORNULL(stale_boc, BOC_MAGIC); |
837 |
1080 |
if (stale_boc) { |
838 |
|
/* Wait for the stale object to become fully fetched, so |
839 |
|
* that we can catch fetch errors, before we unbusy the |
840 |
|
* new object. This serves two purposes. First it helps |
841 |
|
* with request coalescing, and stops long chains of |
842 |
|
* IMS-updated short-TTL objects all streaming from a |
843 |
|
* single slow body fetch. Second it makes sure that all |
844 |
|
* the object attributes are complete when we copy them |
845 |
|
* (this would be an issue for ie OA_GZIPBITS). */ |
846 |
120 |
VSLb(bo->vsl, SLT_Notice, |
847 |
|
"vsl: Conditional fetch wait for streaming object"); |
848 |
120 |
ObjWaitState(stale_oc, BOS_FINISHED); |
849 |
120 |
stale_state = stale_boc->state; |
850 |
120 |
HSH_DerefBoc(bo->wrk, stale_oc); |
851 |
120 |
stale_boc = NULL; |
852 |
120 |
if (stale_state != BOS_FINISHED) { |
853 |
80 |
assert(stale_state == BOS_FAILED); |
854 |
80 |
AN(stale_oc->flags & OC_F_FAILED); |
855 |
80 |
} |
856 |
120 |
} |
857 |
|
|
858 |
1080 |
AZ(stale_boc); |
859 |
1080 |
if (stale_oc->flags & OC_F_FAILED) { |
860 |
80 |
(void)VFP_Error(bo->vfc, "Template object failed"); |
861 |
80 |
vbf_cleanup(bo); |
862 |
80 |
wrk->stats->fetch_failed++; |
863 |
80 |
return (F_STP_FAIL); |
864 |
|
} |
865 |
|
|
866 |
1000 |
if (vbf_beresp2obj(bo)) { |
867 |
40 |
vbf_cleanup(bo); |
868 |
40 |
wrk->stats->fetch_failed++; |
869 |
40 |
return (F_STP_FAIL); |
870 |
|
} |
871 |
|
|
872 |
960 |
if (ObjHasAttr(bo->wrk, stale_oc, OA_ESIDATA)) |
873 |
40 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_ESIDATA)); |
874 |
|
|
875 |
960 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_FLAGS)); |
876 |
960 |
if (oc->flags & OC_F_HFM) |
877 |
80 |
ObjSetFlag(bo->wrk, oc, OF_IMSCAND, 0); |
878 |
960 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_GZIPBITS)); |
879 |
|
|
880 |
960 |
if (bo->do_stream) { |
881 |
920 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
882 |
920 |
HSH_Unbusy(wrk, oc); |
883 |
920 |
ObjSetState(wrk, oc, BOS_STREAM); |
884 |
920 |
} |
885 |
|
|
886 |
960 |
INIT_OBJ(vop, VBF_OBITER_PRIV_MAGIC); |
887 |
960 |
vop->bo = bo; |
888 |
960 |
vop->l = ObjGetLen(bo->wrk, stale_oc); |
889 |
960 |
if (ObjIterate(wrk, stale_oc, vop, vbf_objiterate, 0)) |
890 |
0 |
(void)VFP_Error(bo->vfc, "Template object failed"); |
891 |
|
|
892 |
960 |
if (bo->vfc->failed) { |
893 |
0 |
vbf_cleanup(bo); |
894 |
0 |
wrk->stats->fetch_failed++; |
895 |
0 |
return (F_STP_FAIL); |
896 |
|
} |
897 |
960 |
return (F_STP_FETCHEND); |
898 |
1080 |
} |
899 |
|
|
900 |
|
/*-------------------------------------------------------------------- |
901 |
|
* Create synth object |
902 |
|
* |
903 |
|
* replaces a stale object unless |
904 |
|
* - abandoning the bereq or |
905 |
|
* - leaving vcl_backend_error with return (deliver) and beresp.ttl == 0s or |
906 |
|
* - there is a waitinglist on this object because in this case the default ttl |
907 |
|
* would be 1s, so we might be looking at the same case as the previous |
908 |
|
* |
909 |
|
* We do want the stale replacement to avoid an object pileup with short ttl and |
910 |
|
* long grace/keep, yet there could exist cases where a cache object is |
911 |
|
* deliberately created to momentarily override a stale object. |
912 |
|
* |
913 |
|
* If this case exists, we should add a vcl veto (e.g. beresp.replace_stale with |
914 |
|
* default true) |
915 |
|
*/ |
916 |
|
|
917 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
918 |
15399 |
vbf_stp_error(struct worker *wrk, struct busyobj *bo) |
919 |
|
{ |
920 |
|
ssize_t l, ll, o; |
921 |
|
vtim_real now; |
922 |
|
uint8_t *ptr; |
923 |
|
struct vsb *synth_body; |
924 |
|
struct objcore *stale, *oc; |
925 |
|
|
926 |
15399 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
927 |
15399 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
928 |
15399 |
oc = bo->fetch_objcore; |
929 |
15399 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
930 |
15399 |
AN(oc->flags & OC_F_BUSY); |
931 |
15399 |
assert(bo->director_state == DIR_S_NULL); |
932 |
|
|
933 |
15399 |
if (wrk->vpi->handling != VCL_RET_ERROR) |
934 |
14839 |
wrk->stats->fetch_failed++; |
935 |
|
|
936 |
15399 |
now = W_TIM_real(wrk); |
937 |
15399 |
VSLb_ts_busyobj(bo, "Error", now); |
938 |
|
|
939 |
15399 |
if (oc->stobj->stevedore != NULL) { |
940 |
840 |
oc->boc->fetched_so_far = 0; |
941 |
840 |
ObjFreeObj(bo->wrk, oc); |
942 |
840 |
} |
943 |
|
|
944 |
15399 |
if (bo->storage == NULL) |
945 |
960 |
bo->storage = STV_next(); |
946 |
|
|
947 |
|
// XXX: reset all beresp flags ? |
948 |
|
|
949 |
15399 |
HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); |
950 |
15399 |
if (bo->err_code > 0) |
951 |
958 |
http_PutResponse(bo->beresp, "HTTP/1.1", bo->err_code, |
952 |
479 |
bo->err_reason); |
953 |
|
else |
954 |
14920 |
http_PutResponse(bo->beresp, "HTTP/1.1", 503, |
955 |
|
"Backend fetch failed"); |
956 |
|
|
957 |
15399 |
http_TimeHeader(bo->beresp, "Date: ", now); |
958 |
15399 |
http_SetHeader(bo->beresp, "Server: Varnish"); |
959 |
|
|
960 |
15399 |
stale = bo->stale_oc; |
961 |
15399 |
oc->t_origin = now; |
962 |
15399 |
if (!VTAILQ_EMPTY(&oc->objhead->waitinglist)) { |
963 |
|
/* |
964 |
|
* If there is a waitinglist, it means that there is no |
965 |
|
* grace-able object, so cache the error return for a |
966 |
|
* short time, so the waiting list can drain, rather than |
967 |
|
* each objcore on the waiting list sequentially attempt |
968 |
|
* to fetch from the backend. |
969 |
|
*/ |
970 |
111 |
oc->ttl = 1; |
971 |
111 |
oc->grace = 5; |
972 |
111 |
oc->keep = 5; |
973 |
111 |
stale = NULL; |
974 |
111 |
} else { |
975 |
15288 |
oc->ttl = 0; |
976 |
15288 |
oc->grace = 0; |
977 |
15288 |
oc->keep = 0; |
978 |
|
} |
979 |
|
|
980 |
15399 |
synth_body = VSB_new_auto(); |
981 |
15399 |
AN(synth_body); |
982 |
|
|
983 |
15399 |
VCL_backend_error_method(bo->vcl, wrk, NULL, bo, synth_body); |
984 |
|
|
985 |
15399 |
AZ(VSB_finish(synth_body)); |
986 |
|
|
987 |
15399 |
if (wrk->vpi->handling == VCL_RET_ABANDON || wrk->vpi->handling == VCL_RET_FAIL) { |
988 |
1121 |
VSB_destroy(&synth_body); |
989 |
1121 |
return (F_STP_FAIL); |
990 |
|
} |
991 |
|
|
992 |
14278 |
if (wrk->vpi->handling == VCL_RET_RETRY) { |
993 |
400 |
VSB_destroy(&synth_body); |
994 |
400 |
if (bo->retries++ < cache_param->max_retries) |
995 |
360 |
return (F_STP_RETRY); |
996 |
40 |
VSLb(bo->vsl, SLT_VCL_Error, "Too many retries, failing"); |
997 |
40 |
return (F_STP_FAIL); |
998 |
|
} |
999 |
|
|
1000 |
13878 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
1001 |
|
|
1002 |
13878 |
assert(bo->vfc->wrk == bo->wrk); |
1003 |
13878 |
assert(bo->vfc->oc == oc); |
1004 |
13878 |
assert(bo->vfc->resp == bo->beresp); |
1005 |
13878 |
assert(bo->vfc->req == bo->bereq); |
1006 |
|
|
1007 |
13878 |
if (vbf_beresp2obj(bo)) { |
1008 |
80 |
VSB_destroy(&synth_body); |
1009 |
80 |
return (F_STP_FAIL); |
1010 |
|
} |
1011 |
|
|
1012 |
13798 |
oc->boc->transit_buffer = 0; |
1013 |
|
|
1014 |
13798 |
ll = VSB_len(synth_body); |
1015 |
13798 |
o = 0; |
1016 |
25638 |
while (ll > 0) { |
1017 |
11840 |
l = ll; |
1018 |
11840 |
if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK) |
1019 |
0 |
break; |
1020 |
11840 |
l = vmin(l, ll); |
1021 |
11840 |
memcpy(ptr, VSB_data(synth_body) + o, l); |
1022 |
11840 |
VFP_Extend(bo->vfc, l, l == ll ? VFP_END : VFP_OK); |
1023 |
11840 |
ll -= l; |
1024 |
11840 |
o += l; |
1025 |
|
} |
1026 |
13798 |
AZ(ObjSetU64(wrk, oc, OA_LEN, o)); |
1027 |
13798 |
VSB_destroy(&synth_body); |
1028 |
13798 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
1029 |
13798 |
HSH_Unbusy(wrk, oc); |
1030 |
13798 |
if (stale != NULL && oc->ttl > 0) |
1031 |
840 |
HSH_Kill(stale); |
1032 |
13798 |
ObjSetState(wrk, oc, BOS_FINISHED); |
1033 |
13798 |
return (F_STP_DONE); |
1034 |
15399 |
} |
1035 |
|
|
1036 |
|
/*-------------------------------------------------------------------- |
1037 |
|
*/ |
1038 |
|
|
1039 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
1040 |
2600 |
vbf_stp_fail(struct worker *wrk, struct busyobj *bo) |
1041 |
|
{ |
1042 |
|
struct objcore *oc; |
1043 |
|
|
1044 |
2600 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1045 |
2600 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1046 |
2600 |
oc = bo->fetch_objcore; |
1047 |
2600 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1048 |
|
|
1049 |
2600 |
assert(oc->boc->state < BOS_FINISHED); |
1050 |
2600 |
HSH_Fail(oc); |
1051 |
2600 |
if (!(oc->flags & OC_F_BUSY)) |
1052 |
600 |
HSH_Kill(oc); |
1053 |
2600 |
ObjSetState(wrk, oc, BOS_FAILED); |
1054 |
2600 |
return (F_STP_DONE); |
1055 |
|
} |
1056 |
|
|
1057 |
|
/*-------------------------------------------------------------------- |
1058 |
|
*/ |
1059 |
|
|
1060 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
1061 |
0 |
vbf_stp_done(struct worker *wrk, struct busyobj *bo) |
1062 |
|
{ |
1063 |
|
|
1064 |
0 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1065 |
0 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1066 |
0 |
WRONG("Just plain wrong"); |
1067 |
0 |
NEEDLESS(return (F_STP_DONE)); |
1068 |
|
} |
1069 |
|
|
1070 |
|
static void v_matchproto_(task_func_t) |
1071 |
87918 |
vbf_fetch_thread(struct worker *wrk, void *priv) |
1072 |
|
{ |
1073 |
|
struct vrt_ctx ctx[1]; |
1074 |
|
struct busyobj *bo; |
1075 |
|
struct objcore *oc; |
1076 |
|
const struct fetch_step *stp; |
1077 |
|
|
1078 |
87918 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1079 |
87918 |
CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); |
1080 |
87918 |
CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); |
1081 |
87918 |
oc = bo->fetch_objcore; |
1082 |
87918 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1083 |
|
|
1084 |
87918 |
THR_SetBusyobj(bo); |
1085 |
87918 |
stp = F_STP_MKBEREQ; |
1086 |
87918 |
assert(isnan(bo->t_first)); |
1087 |
87918 |
assert(isnan(bo->t_prev)); |
1088 |
87918 |
VSLb_ts_busyobj(bo, "Start", W_TIM_real(wrk)); |
1089 |
|
|
1090 |
87918 |
bo->wrk = wrk; |
1091 |
87918 |
wrk->vsl = bo->vsl; |
1092 |
|
|
1093 |
|
#if 0 |
1094 |
|
if (bo->stale_oc != NULL) { |
1095 |
|
CHECK_OBJ_NOTNULL(bo->stale_oc, OBJCORE_MAGIC); |
1096 |
|
/* We don't want the oc/stevedore ops in fetching thread */ |
1097 |
|
if (!ObjCheckFlag(wrk, bo->stale_oc, OF_IMSCAND)) |
1098 |
|
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1099 |
|
} |
1100 |
|
#endif |
1101 |
|
|
1102 |
87918 |
VCL_TaskEnter(bo->privs); |
1103 |
475570 |
while (stp != F_STP_DONE) { |
1104 |
387652 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1105 |
387652 |
assert(oc->boc->refcount >= 1); |
1106 |
387652 |
if (oc->boc->state < BOS_REQ_DONE) |
1107 |
91110 |
AN(bo->req); |
1108 |
|
else |
1109 |
296542 |
AZ(bo->req); |
1110 |
387652 |
AN(stp); |
1111 |
387652 |
AN(stp->name); |
1112 |
387652 |
AN(stp->func); |
1113 |
387652 |
stp = stp->func(wrk, bo); |
1114 |
|
} |
1115 |
|
|
1116 |
87918 |
assert(bo->director_state == DIR_S_NULL); |
1117 |
|
|
1118 |
87918 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
1119 |
87918 |
VCL_Bo2Ctx(ctx, bo); |
1120 |
87918 |
VCL_TaskLeave(ctx, bo->privs); |
1121 |
87918 |
http_Teardown(bo->bereq); |
1122 |
87918 |
http_Teardown(bo->beresp); |
1123 |
|
// cannot make assumptions about the number of references here #3434 |
1124 |
87918 |
if (bo->bereq_body != NULL) |
1125 |
720 |
(void) HSH_DerefObjCore(bo->wrk, &bo->bereq_body, 0); |
1126 |
|
|
1127 |
87918 |
if (oc->boc->state == BOS_FINISHED) { |
1128 |
85318 |
AZ(oc->flags & OC_F_FAILED); |
1129 |
170636 |
VSLb(bo->vsl, SLT_Length, "%ju", |
1130 |
85318 |
(uintmax_t)ObjGetLen(bo->wrk, oc)); |
1131 |
85318 |
} |
1132 |
|
// AZ(oc->boc); // XXX |
1133 |
|
|
1134 |
87918 |
if (bo->stale_oc != NULL) |
1135 |
5720 |
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1136 |
|
|
1137 |
87918 |
wrk->vsl = NULL; |
1138 |
87918 |
HSH_DerefBoc(wrk, oc); |
1139 |
87918 |
SES_Rel(bo->sp); |
1140 |
87918 |
VBO_ReleaseBusyObj(wrk, &bo); |
1141 |
87918 |
THR_SetBusyobj(NULL); |
1142 |
87918 |
} |
1143 |
|
|
1144 |
|
/*-------------------------------------------------------------------- |
1145 |
|
*/ |
1146 |
|
|
1147 |
|
void |
1148 |
87959 |
VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, |
1149 |
|
struct objcore *oldoc, enum vbf_fetch_mode_e mode) |
1150 |
|
{ |
1151 |
|
struct boc *boc; |
1152 |
|
struct busyobj *bo; |
1153 |
|
enum task_prio prio; |
1154 |
|
const char *how; |
1155 |
|
|
1156 |
87959 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1157 |
87959 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
1158 |
87959 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1159 |
87959 |
AN(oc->flags & OC_F_BUSY); |
1160 |
87959 |
CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC); |
1161 |
|
|
1162 |
87959 |
bo = VBO_GetBusyObj(wrk, req); |
1163 |
87959 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1164 |
87959 |
AN(bo->vcl); |
1165 |
|
|
1166 |
87959 |
boc = HSH_RefBoc(oc); |
1167 |
87959 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
1168 |
|
|
1169 |
87959 |
switch (mode) { |
1170 |
|
case VBF_PASS: |
1171 |
30720 |
prio = TASK_QUEUE_BO; |
1172 |
30720 |
how = "pass"; |
1173 |
30720 |
bo->uncacheable = 1; |
1174 |
30720 |
break; |
1175 |
|
case VBF_NORMAL: |
1176 |
53839 |
prio = TASK_QUEUE_BO; |
1177 |
53839 |
how = "fetch"; |
1178 |
53839 |
break; |
1179 |
|
case VBF_BACKGROUND: |
1180 |
3400 |
prio = TASK_QUEUE_BG; |
1181 |
3400 |
how = "bgfetch"; |
1182 |
3400 |
bo->is_bgfetch = 1; |
1183 |
3400 |
break; |
1184 |
|
default: |
1185 |
0 |
WRONG("Wrong fetch mode"); |
1186 |
0 |
} |
1187 |
|
|
1188 |
|
#define REQ_BEREQ_FLAG(l, r, w, d) bo->l = req->l; |
1189 |
|
#include "tbl/req_bereq_flags.h" |
1190 |
|
|
1191 |
|
VSLb(bo->vsl, SLT_Begin, "bereq %ju %s", VXID(req->vsl->wid), how); |
1192 |
|
VSLbs(bo->vsl, SLT_VCL_use, TOSTRAND(VCL_Name(bo->vcl))); |
1193 |
|
VSLb(req->vsl, SLT_Link, "bereq %ju %s", VXID(bo->vsl->wid), how); |
1194 |
|
|
1195 |
|
THR_SetBusyobj(bo); |
1196 |
|
|
1197 |
|
bo->sp = req->sp; |
1198 |
|
SES_Ref(bo->sp); |
1199 |
|
|
1200 |
|
oc->boc->vary = req->vary_b; |
1201 |
|
req->vary_b = NULL; |
1202 |
|
|
1203 |
|
HSH_Ref(oc); |
1204 |
87959 |
AZ(bo->fetch_objcore); |
1205 |
|
bo->fetch_objcore = oc; |
1206 |
|
|
1207 |
87959 |
AZ(bo->stale_oc); |
1208 |
87959 |
if (oldoc != NULL) { |
1209 |
5760 |
assert(oldoc->refcnt > 0); |
1210 |
5760 |
HSH_Ref(oldoc); |
1211 |
5760 |
bo->stale_oc = oldoc; |
1212 |
5760 |
} |
1213 |
|
|
1214 |
87959 |
AZ(bo->req); |
1215 |
|
bo->req = req; |
1216 |
|
|
1217 |
|
bo->fetch_task->priv = bo; |
1218 |
|
bo->fetch_task->func = vbf_fetch_thread; |
1219 |
|
|
1220 |
87959 |
if (Pool_Task(wrk->pool, bo->fetch_task, prio)) { |
1221 |
93 |
wrk->stats->bgfetch_no_thread++; |
1222 |
93 |
VSLb(bo->vsl, SLT_FetchError, |
1223 |
|
"No thread available for bgfetch"); |
1224 |
93 |
(void)vbf_stp_fail(req->wrk, bo); |
1225 |
93 |
if (bo->stale_oc != NULL) |
1226 |
40 |
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1227 |
93 |
HSH_DerefBoc(wrk, oc); |
1228 |
93 |
SES_Rel(bo->sp); |
1229 |
93 |
THR_SetBusyobj(NULL); |
1230 |
93 |
VBO_ReleaseBusyObj(wrk, &bo); |
1231 |
93 |
} else { |
1232 |
87866 |
THR_SetBusyobj(NULL); |
1233 |
87866 |
bo = NULL; /* ref transferred to fetch thread */ |
1234 |
87866 |
if (mode == VBF_BACKGROUND) { |
1235 |
3360 |
ObjWaitState(oc, BOS_REQ_DONE); |
1236 |
3360 |
(void)VRB_Ignore(req); |
1237 |
3360 |
} else { |
1238 |
84506 |
ObjWaitState(oc, BOS_STREAM); |
1239 |
84506 |
if (oc->boc->state == BOS_FAILED) { |
1240 |
1040 |
AN((oc->flags & OC_F_FAILED)); |
1241 |
1040 |
} else { |
1242 |
83466 |
AZ(oc->flags & OC_F_BUSY); |
1243 |
|
} |
1244 |
|
} |
1245 |
|
} |
1246 |
87959 |
AZ(bo); |
1247 |
|
VSLb_ts_req(req, "Fetch", W_TIM_real(wrk)); |
1248 |
87959 |
assert(oc->boc == boc); |
1249 |
|
HSH_DerefBoc(wrk, oc); |
1250 |
87959 |
if (mode == VBF_BACKGROUND) |
1251 |
3400 |
(void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY); |
1252 |
|
} |