| | varnish-cache/bin/varnishd/cache/cache_fetch.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include "cache_varnishd.h" |
34 |
|
#include "cache_filter.h" |
35 |
|
#include "cache_objhead.h" |
36 |
|
#include "storage/storage.h" |
37 |
|
#include "vcl.h" |
38 |
|
#include "vtim.h" |
39 |
|
#include "vcc_interface.h" |
40 |
|
|
41 |
|
#define FETCH_STEPS \ |
42 |
|
FETCH_STEP(mkbereq, MKBEREQ) \ |
43 |
|
FETCH_STEP(retry, RETRY) \ |
44 |
|
FETCH_STEP(startfetch, STARTFETCH) \ |
45 |
|
FETCH_STEP(condfetch, CONDFETCH) \ |
46 |
|
FETCH_STEP(fetch, FETCH) \ |
47 |
|
FETCH_STEP(fetchbody, FETCHBODY) \ |
48 |
|
FETCH_STEP(fetchend, FETCHEND) \ |
49 |
|
FETCH_STEP(error, ERROR) \ |
50 |
|
FETCH_STEP(fail, FAIL) \ |
51 |
|
FETCH_STEP(done, DONE) |
52 |
|
|
53 |
|
typedef const struct fetch_step *vbf_state_f(struct worker *, struct busyobj *); |
54 |
|
|
55 |
|
struct fetch_step { |
56 |
|
const char *name; |
57 |
|
vbf_state_f *func; |
58 |
73558 |
}; |
59 |
73558 |
|
60 |
73558 |
#define FETCH_STEP(l, U) \ |
61 |
73558 |
static vbf_state_f vbf_stp_##l; \ |
62 |
|
static const struct fetch_step F_STP_##U[1] = {{ .name = "Fetch Step " #l, .func = vbf_stp_##l, }}; |
63 |
|
FETCH_STEPS |
64 |
|
#undef FETCH_STEP |
65 |
|
|
66 |
|
static hdr_t const H_X_Varnish = HDR("X-Varnish"); |
67 |
|
|
68 |
|
/*-------------------------------------------------------------------- |
69 |
|
* Allocate an object, with fall-back to Transient. |
70 |
|
* XXX: This somewhat overlaps the stuff in stevedore.c |
71 |
|
* XXX: Should this be merged over there ? |
72 |
|
*/ |
73 |
|
|
74 |
|
static int |
75 |
88840 |
vbf_allocobj(struct busyobj *bo, unsigned l) |
76 |
|
{ |
77 |
|
struct objcore *oc; |
78 |
|
const struct stevedore *stv; |
79 |
|
vtim_dur lifetime; |
80 |
|
|
81 |
88840 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
82 |
88840 |
oc = bo->fetch_objcore; |
83 |
88840 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
84 |
|
|
85 |
88840 |
lifetime = oc->ttl + oc->grace + oc->keep; |
86 |
|
|
87 |
88840 |
if (bo->uncacheable) { |
88 |
35442 |
stv = stv_transient; |
89 |
35442 |
bo->wrk->stats->beresp_uncacheable++; |
90 |
35442 |
} |
91 |
53398 |
else if (lifetime < cache_param->shortlived) { |
92 |
4840 |
stv = stv_transient; |
93 |
4840 |
bo->wrk->stats->beresp_shortlived++; |
94 |
4840 |
} |
95 |
|
else |
96 |
48558 |
stv = bo->storage; |
97 |
|
|
98 |
88840 |
bo->storage = NULL; |
99 |
|
|
100 |
88840 |
if (stv == NULL) |
101 |
40 |
return (0); |
102 |
|
|
103 |
88800 |
if (STV_NewObject(bo->wrk, oc, stv, l)) |
104 |
88520 |
return (1); |
105 |
|
|
106 |
280 |
if (stv == stv_transient) |
107 |
160 |
return (0); |
108 |
|
|
109 |
|
/* |
110 |
|
* Try to salvage the transaction by allocating a shortlived object |
111 |
|
* on Transient storage. |
112 |
|
*/ |
113 |
|
|
114 |
120 |
oc->ttl = vmin_t(float, oc->ttl, cache_param->shortlived); |
115 |
120 |
oc->grace = 0.0; |
116 |
120 |
oc->keep = 0.0; |
117 |
120 |
return (STV_NewObject(bo->wrk, oc, stv_transient, l)); |
118 |
88840 |
} |
119 |
|
|
120 |
|
static void |
121 |
80076 |
vbf_cleanup(struct busyobj *bo) |
122 |
|
{ |
123 |
|
struct vfp_ctx *vfc; |
124 |
|
|
125 |
80076 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
126 |
80076 |
vfc = bo->vfc; |
127 |
80076 |
CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC); |
128 |
|
|
129 |
80076 |
bo->acct.beresp_bodybytes += VFP_Close(vfc); |
130 |
80076 |
bo->vfp_filter_list = NULL; |
131 |
|
|
132 |
80076 |
if (bo->director_state != DIR_S_NULL) |
133 |
79752 |
VDI_Finish(bo); |
134 |
80076 |
} |
135 |
|
|
136 |
|
void |
137 |
360 |
Bereq_Rollback(VRT_CTX) |
138 |
|
{ |
139 |
|
struct busyobj *bo; |
140 |
|
|
141 |
360 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
142 |
360 |
bo = ctx->bo; |
143 |
360 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
144 |
|
|
145 |
360 |
if (bo->htc != NULL) { |
146 |
320 |
assert(bo->htc->body_status != BS_TAKEN); |
147 |
320 |
if (bo->htc->body_status != BS_NONE) |
148 |
40 |
bo->htc->doclose = SC_RESP_CLOSE; |
149 |
320 |
} |
150 |
|
|
151 |
360 |
vbf_cleanup(bo); |
152 |
360 |
VCL_TaskLeave(ctx, bo->privs); |
153 |
360 |
VCL_TaskEnter(bo->privs); |
154 |
360 |
HTTP_Clone(bo->bereq, bo->bereq0); |
155 |
360 |
bo->vfp_filter_list = NULL; |
156 |
360 |
bo->err_reason = NULL; |
157 |
360 |
AN(bo->ws_bo); |
158 |
360 |
WS_Rollback(bo->ws, bo->ws_bo); |
159 |
360 |
} |
160 |
|
|
161 |
|
/*-------------------------------------------------------------------- |
162 |
|
* Turn the beresp into a obj |
163 |
|
*/ |
164 |
|
|
165 |
|
static int |
166 |
88835 |
vbf_beresp2obj(struct busyobj *bo) |
167 |
|
{ |
168 |
|
unsigned l, l2; |
169 |
|
const char *b; |
170 |
|
uint8_t *bp; |
171 |
88835 |
struct vsb *vary = NULL; |
172 |
88835 |
int varyl = 0; |
173 |
|
struct objcore *oc; |
174 |
|
|
175 |
88835 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
176 |
88835 |
oc = bo->fetch_objcore; |
177 |
88835 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
178 |
|
|
179 |
88835 |
l = 0; |
180 |
|
|
181 |
|
/* Create Vary instructions */ |
182 |
88835 |
if (!(oc->flags & OC_F_PRIVATE)) { |
183 |
57600 |
varyl = VRY_Create(bo, &vary); |
184 |
57600 |
if (varyl > 0) { |
185 |
8440 |
AN(vary); |
186 |
8440 |
assert(varyl == VSB_len(vary)); |
187 |
8440 |
l += PRNDUP((intptr_t)varyl); |
188 |
57600 |
} else if (varyl < 0) { |
189 |
|
/* |
190 |
|
* Vary parse error |
191 |
|
* Complain about it, and make this a pass. |
192 |
|
*/ |
193 |
200 |
VSLb(bo->vsl, SLT_Error, |
194 |
|
"Illegal 'Vary' header from backend, " |
195 |
|
"making this a pass."); |
196 |
200 |
bo->uncacheable = 1; |
197 |
200 |
AZ(vary); |
198 |
200 |
} else |
199 |
|
/* No vary */ |
200 |
48960 |
AZ(vary); |
201 |
57600 |
} |
202 |
|
|
203 |
177670 |
l2 = http_EstimateWS(bo->beresp, |
204 |
88835 |
bo->uncacheable ? HTTPH_A_PASS : HTTPH_A_INS); |
205 |
88835 |
l += l2; |
206 |
|
|
207 |
88835 |
if (bo->uncacheable) |
208 |
35435 |
oc->flags |= OC_F_HFM; |
209 |
|
|
210 |
88835 |
if (!vbf_allocobj(bo, l)) { |
211 |
240 |
if (vary != NULL) |
212 |
0 |
VSB_destroy(&vary); |
213 |
240 |
AZ(vary); |
214 |
240 |
return (VFP_Error(bo->vfc, "Could not get storage")); |
215 |
|
} |
216 |
|
|
217 |
88595 |
if (vary != NULL) { |
218 |
8440 |
AN(ObjSetAttr(bo->wrk, oc, OA_VARY, varyl, VSB_data(vary))); |
219 |
8440 |
VSB_destroy(&vary); |
220 |
8440 |
} |
221 |
|
|
222 |
88595 |
AZ(ObjSetXID(bo->wrk, oc, bo->vsl->wid)); |
223 |
|
|
224 |
|
/* for HTTP_Encode() VSLH call */ |
225 |
88595 |
bo->beresp->logtag = SLT_ObjMethod; |
226 |
|
|
227 |
|
/* Filter into object */ |
228 |
88595 |
bp = ObjSetAttr(bo->wrk, oc, OA_HEADERS, l2, NULL); |
229 |
88595 |
AN(bp); |
230 |
177190 |
HTTP_Encode(bo->beresp, bp, l2, |
231 |
88595 |
bo->uncacheable ? HTTPH_A_PASS : HTTPH_A_INS); |
232 |
|
|
233 |
88595 |
if (http_GetHdr(bo->beresp, H_Last_Modified, &b)) |
234 |
1720 |
AZ(ObjSetDouble(bo->wrk, oc, OA_LASTMODIFIED, VTIM_parse(b))); |
235 |
|
else |
236 |
86875 |
AZ(ObjSetDouble(bo->wrk, oc, OA_LASTMODIFIED, |
237 |
|
floor(oc->t_origin))); |
238 |
|
|
239 |
88595 |
return (0); |
240 |
88835 |
} |
241 |
|
|
242 |
|
/*-------------------------------------------------------------------- |
243 |
|
* Copy req->bereq and release req if no body |
244 |
|
*/ |
245 |
|
|
246 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
247 |
89756 |
vbf_stp_mkbereq(struct worker *wrk, struct busyobj *bo) |
248 |
|
{ |
249 |
|
const char *q; |
250 |
|
struct objcore *oc; |
251 |
|
|
252 |
89756 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
253 |
89756 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
254 |
89756 |
CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); |
255 |
89756 |
oc = bo->fetch_objcore; |
256 |
89756 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
257 |
|
|
258 |
89756 |
assert(oc->boc->state == BOS_INVALID); |
259 |
89756 |
AZ(bo->storage); |
260 |
|
|
261 |
89756 |
HTTP_Setup(bo->bereq0, bo->ws, bo->vsl, SLT_BereqMethod); |
262 |
179512 |
http_FilterReq(bo->bereq0, bo->req->http, |
263 |
89756 |
bo->uncacheable ? HTTPH_R_PASS : HTTPH_R_FETCH); |
264 |
|
|
265 |
89756 |
if (bo->uncacheable) |
266 |
31196 |
AZ(bo->stale_oc); |
267 |
|
else { |
268 |
58560 |
http_ForceField(bo->bereq0, HTTP_HDR_METHOD, "GET"); |
269 |
58560 |
if (cache_param->http_gzip_support) |
270 |
58360 |
http_ForceHeader(bo->bereq0, H_Accept_Encoding, "gzip"); |
271 |
|
} |
272 |
89756 |
http_ForceField(bo->bereq0, HTTP_HDR_PROTO, "HTTP/1.1"); |
273 |
|
|
274 |
90916 |
if (bo->stale_oc != NULL && |
275 |
5800 |
ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND) && |
276 |
1280 |
(bo->stale_oc->boc != NULL || ObjGetLen(wrk, bo->stale_oc) != 0)) { |
277 |
1240 |
AZ(bo->stale_oc->flags & (OC_F_HFM|OC_F_PRIVATE)); |
278 |
1240 |
q = RFC2616_Strong_LM(NULL, wrk, bo->stale_oc); |
279 |
1240 |
if (q != NULL) |
280 |
1040 |
http_PrintfHeader(bo->bereq0, |
281 |
520 |
"If-Modified-Since: %s", q); |
282 |
1240 |
q = HTTP_GetHdrPack(bo->wrk, bo->stale_oc, H_ETag); |
283 |
1240 |
if (q != NULL) |
284 |
1520 |
http_PrintfHeader(bo->bereq0, |
285 |
760 |
"If-None-Match: %s", q); |
286 |
1240 |
} |
287 |
|
|
288 |
89756 |
http_CopyHome(bo->bereq0); |
289 |
89756 |
HTTP_Setup(bo->bereq, bo->ws, bo->vsl, SLT_BereqMethod); |
290 |
89756 |
bo->ws_bo = WS_Snapshot(bo->ws); |
291 |
89756 |
HTTP_Clone(bo->bereq, bo->bereq0); |
292 |
|
|
293 |
89756 |
if (bo->req->req_body_status->avail == 0) { |
294 |
86285 |
bo->req = NULL; |
295 |
86285 |
ObjSetState(bo->wrk, oc, BOS_REQ_DONE); |
296 |
89756 |
} else if (bo->req->req_body_status == BS_CACHED) { |
297 |
920 |
AN(bo->req->body_oc); |
298 |
920 |
bo->bereq_body = bo->req->body_oc; |
299 |
920 |
HSH_Ref(bo->bereq_body); |
300 |
920 |
bo->req = NULL; |
301 |
920 |
ObjSetState(bo->wrk, oc, BOS_REQ_DONE); |
302 |
920 |
} |
303 |
89756 |
return (F_STP_STARTFETCH); |
304 |
|
} |
305 |
|
|
306 |
|
/*-------------------------------------------------------------------- |
307 |
|
* Start a new VSL transaction and try again |
308 |
|
* Prepare the busyobj and fetch processors |
309 |
|
*/ |
310 |
|
|
311 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
312 |
1440 |
vbf_stp_retry(struct worker *wrk, struct busyobj *bo) |
313 |
|
{ |
314 |
1440 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
315 |
1440 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
316 |
|
|
317 |
1440 |
assert(bo->fetch_objcore->boc->state <= BOS_REQ_DONE); |
318 |
|
|
319 |
1440 |
if (bo->no_retry != NULL) { |
320 |
160 |
VSLb(bo->vsl, SLT_Error, |
321 |
80 |
"Retry not possible, %s", bo->no_retry); |
322 |
80 |
return (F_STP_FAIL); |
323 |
|
} |
324 |
|
|
325 |
1360 |
VSLb_ts_busyobj(bo, "Retry", W_TIM_real(wrk)); |
326 |
|
|
327 |
|
/* VDI_Finish (via vbf_cleanup) must have been called before */ |
328 |
1360 |
assert(bo->director_state == DIR_S_NULL); |
329 |
|
|
330 |
|
/* reset other bo attributes - See VBO_GetBusyObj */ |
331 |
1360 |
bo->storage = NULL; |
332 |
1360 |
bo->do_esi = 0; |
333 |
1360 |
bo->do_stream = 1; |
334 |
1360 |
bo->was_304 = 0; |
335 |
1360 |
bo->err_code = 0; |
336 |
1360 |
bo->err_reason = NULL; |
337 |
1360 |
bo->connect_timeout = NAN; |
338 |
1360 |
bo->first_byte_timeout = NAN; |
339 |
1360 |
bo->between_bytes_timeout = NAN; |
340 |
1360 |
if (bo->htc != NULL) |
341 |
0 |
bo->htc->doclose = SC_NULL; |
342 |
|
|
343 |
|
// XXX: BereqEnd + BereqAcct ? |
344 |
1360 |
VSL_ChgId(bo->vsl, "bereq", "retry", VXID_Get(wrk, VSL_BACKENDMARKER)); |
345 |
1360 |
VSLb_ts_busyobj(bo, "Start", bo->t_prev); |
346 |
1360 |
http_VSL_log(bo->bereq); |
347 |
|
|
348 |
1360 |
return (F_STP_STARTFETCH); |
349 |
1440 |
} |
350 |
|
|
351 |
|
/*-------------------------------------------------------------------- |
352 |
|
* 304 setup logic |
353 |
|
*/ |
354 |
|
|
355 |
|
static int |
356 |
1320 |
vbf_304_logic(struct busyobj *bo) |
357 |
|
{ |
358 |
1320 |
if (bo->stale_oc != NULL && |
359 |
1160 |
ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND)) { |
360 |
1120 |
AZ(bo->stale_oc->flags & (OC_F_HFM|OC_F_PRIVATE)); |
361 |
1120 |
if (ObjCheckFlag(bo->wrk, bo->stale_oc, OF_CHGCE)) { |
362 |
|
/* |
363 |
|
* If a VFP changed C-E in the stored |
364 |
|
* object, then don't overwrite C-E from |
365 |
|
* the IMS fetch, and we must weaken any |
366 |
|
* new ETag we get. |
367 |
|
*/ |
368 |
80 |
RFC2616_Weaken_Etag(bo->beresp); |
369 |
80 |
} |
370 |
1120 |
http_Unset(bo->beresp, H_Content_Encoding); |
371 |
1120 |
http_Unset(bo->beresp, H_Content_Length); |
372 |
1120 |
HTTP_Merge(bo->wrk, bo->stale_oc, bo->beresp); |
373 |
1120 |
assert(http_IsStatus(bo->beresp, 200)); |
374 |
1120 |
bo->was_304 = 1; |
375 |
1320 |
} else if (!bo->uncacheable) { |
376 |
|
/* |
377 |
|
* Backend sent unallowed 304 |
378 |
|
*/ |
379 |
40 |
VSLb(bo->vsl, SLT_Error, |
380 |
|
"304 response but not conditional fetch"); |
381 |
40 |
bo->htc->doclose = SC_RX_BAD; |
382 |
40 |
vbf_cleanup(bo); |
383 |
40 |
return (-1); |
384 |
|
} |
385 |
1280 |
return (1); |
386 |
1320 |
} |
387 |
|
|
388 |
|
/*-------------------------------------------------------------------- |
389 |
|
* Setup bereq from bereq0, run vcl_backend_fetch |
390 |
|
*/ |
391 |
|
|
392 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
393 |
91117 |
vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo) |
394 |
|
{ |
395 |
|
int i; |
396 |
|
vtim_real now; |
397 |
|
unsigned handling; |
398 |
|
struct objcore *oc; |
399 |
|
|
400 |
91117 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
401 |
91117 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
402 |
91117 |
oc = bo->fetch_objcore; |
403 |
91117 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
404 |
|
|
405 |
91117 |
AZ(bo->storage); |
406 |
91117 |
bo->storage = bo->uncacheable ? stv_transient : STV_next(); |
407 |
|
|
408 |
91117 |
if (bo->retries > 0) |
409 |
1360 |
http_Unset(bo->bereq, H_X_Varnish); |
410 |
|
|
411 |
91117 |
http_PrintfHeader(bo->bereq, "X-Varnish: %ju", VXID(bo->vsl->wid)); |
412 |
|
|
413 |
91117 |
if (bo->bereq_body == NULL && bo->req == NULL) |
414 |
87642 |
http_Unset(bo->bereq, H_Content_Length); |
415 |
|
|
416 |
91117 |
VCL_backend_fetch_method(bo->vcl, wrk, NULL, bo, NULL); |
417 |
|
|
418 |
91117 |
if (wrk->vpi->handling == VCL_RET_ABANDON || |
419 |
90995 |
wrk->vpi->handling == VCL_RET_FAIL) |
420 |
280 |
return (F_STP_FAIL); |
421 |
|
|
422 |
90837 |
assert (wrk->vpi->handling == VCL_RET_FETCH || |
423 |
|
wrk->vpi->handling == VCL_RET_ERROR); |
424 |
|
|
425 |
90837 |
HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); |
426 |
|
|
427 |
90837 |
assert(oc->boc->state <= BOS_REQ_DONE); |
428 |
|
|
429 |
90837 |
AZ(bo->htc); |
430 |
|
|
431 |
90837 |
VFP_Setup(bo->vfc, wrk); |
432 |
90837 |
bo->vfc->oc = oc; |
433 |
90837 |
bo->vfc->resp = bo->beresp; |
434 |
90837 |
bo->vfc->req = bo->bereq; |
435 |
|
|
436 |
90837 |
if (wrk->vpi->handling == VCL_RET_ERROR) |
437 |
400 |
return (F_STP_ERROR); |
438 |
|
|
439 |
90437 |
VSLb_ts_busyobj(bo, "Fetch", W_TIM_real(wrk)); |
440 |
90437 |
i = VDI_GetHdr(bo); |
441 |
90437 |
if (bo->htc != NULL) |
442 |
80036 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
443 |
|
|
444 |
90437 |
bo->t_resp = now = W_TIM_real(wrk); |
445 |
90437 |
VSLb_ts_busyobj(bo, "Beresp", now); |
446 |
|
|
447 |
90437 |
if (i) { |
448 |
10357 |
assert(bo->director_state == DIR_S_NULL); |
449 |
10357 |
return (F_STP_ERROR); |
450 |
|
} |
451 |
|
|
452 |
80080 |
if (bo->htc != NULL && bo->htc->body_status == BS_ERROR) { |
453 |
240 |
bo->htc->doclose = SC_RX_BODY; |
454 |
240 |
vbf_cleanup(bo); |
455 |
240 |
VSLb(bo->vsl, SLT_Error, "Body cannot be fetched"); |
456 |
240 |
assert(bo->director_state == DIR_S_NULL); |
457 |
240 |
return (F_STP_ERROR); |
458 |
|
} |
459 |
|
|
460 |
79840 |
if (!http_GetHdr(bo->beresp, H_Date, NULL)) { |
461 |
|
/* |
462 |
|
* RFC 2616 14.18 Date: The Date general-header field |
463 |
|
* represents the date and time at which the message was |
464 |
|
* originated, having the same semantics as orig-date in |
465 |
|
* RFC 822. ... A received message that does not have a |
466 |
|
* Date header field MUST be assigned one by the recipient |
467 |
|
* if the message will be cached by that recipient or |
468 |
|
* gatewayed via a protocol which requires a Date. |
469 |
|
* |
470 |
|
* If we didn't get a Date header, we assign one here. |
471 |
|
*/ |
472 |
3320 |
http_TimeHeader(bo->beresp, "Date: ", now); |
473 |
3320 |
} |
474 |
|
|
475 |
|
/* |
476 |
|
* These two headers can be spread over multiple actual headers |
477 |
|
* and we rely on their content outside of VCL, so collect them |
478 |
|
* into one line here. |
479 |
|
*/ |
480 |
79840 |
http_CollectHdr(bo->beresp, H_Cache_Control); |
481 |
79840 |
http_CollectHdr(bo->beresp, H_Vary); |
482 |
|
|
483 |
|
/* What does RFC2616 think about TTL ? */ |
484 |
159680 |
RFC2616_Ttl(bo, now, |
485 |
79840 |
&oc->t_origin, |
486 |
79840 |
&oc->ttl, |
487 |
79840 |
&oc->grace, |
488 |
79840 |
&oc->keep); |
489 |
|
|
490 |
79840 |
AZ(bo->do_esi); |
491 |
79840 |
AZ(bo->was_304); |
492 |
|
|
493 |
79840 |
if (http_IsStatus(bo->beresp, 304) && vbf_304_logic(bo) < 0) |
494 |
40 |
return (F_STP_ERROR); |
495 |
|
|
496 |
79800 |
if (bo->htc != NULL && bo->htc->doclose == SC_NULL && |
497 |
76110 |
http_GetHdrField(bo->bereq, H_Connection, "close", NULL)) |
498 |
520 |
bo->htc->doclose = SC_REQ_CLOSE; |
499 |
|
|
500 |
79800 |
VCL_backend_response_method(bo->vcl, wrk, NULL, bo, NULL); |
501 |
|
|
502 |
79800 |
if (bo->htc != NULL && bo->htc->doclose == SC_NULL && |
503 |
75229 |
http_GetHdrField(bo->beresp, H_Connection, "close", NULL)) |
504 |
40 |
bo->htc->doclose = SC_RESP_CLOSE; |
505 |
|
|
506 |
79718 |
if (VRG_CheckBo(bo) < 0) { |
507 |
280 |
if (bo->director_state != DIR_S_NULL) |
508 |
240 |
VDI_Finish(bo); |
509 |
280 |
return (F_STP_ERROR); |
510 |
|
} |
511 |
|
|
512 |
158676 |
if (wrk->vpi->handling == VCL_RET_ABANDON || |
513 |
79318 |
wrk->vpi->handling == VCL_RET_FAIL || |
514 |
79238 |
wrk->vpi->handling == VCL_RET_ERROR) { |
515 |
|
/* do not count deliberately ending the backend connection as |
516 |
|
* fetch failure |
517 |
|
*/ |
518 |
360 |
handling = wrk->vpi->handling; |
519 |
360 |
if (bo->htc) |
520 |
360 |
bo->htc->doclose = SC_RESP_CLOSE; |
521 |
360 |
vbf_cleanup(bo); |
522 |
360 |
wrk->vpi->handling = handling; |
523 |
|
|
524 |
360 |
if (wrk->vpi->handling == VCL_RET_ERROR) |
525 |
160 |
return (F_STP_ERROR); |
526 |
|
else |
527 |
200 |
return (F_STP_FAIL); |
528 |
|
} |
529 |
|
|
530 |
79078 |
if (wrk->vpi->handling == VCL_RET_RETRY) { |
531 |
1160 |
if (bo->htc && bo->htc->body_status != BS_NONE) |
532 |
280 |
bo->htc->doclose = SC_RESP_CLOSE; |
533 |
1160 |
vbf_cleanup(bo); |
534 |
|
|
535 |
1160 |
if (bo->retries++ < bo->max_retries) |
536 |
1080 |
return (F_STP_RETRY); |
537 |
|
|
538 |
80 |
VSLb(bo->vsl, SLT_VCL_Error, |
539 |
|
"Too many retries, delivering 503"); |
540 |
80 |
assert(bo->director_state == DIR_S_NULL); |
541 |
80 |
return (F_STP_ERROR); |
542 |
|
} |
543 |
|
|
544 |
77918 |
VSLb_ts_busyobj(bo, "Process", W_TIM_real(wrk)); |
545 |
77918 |
assert(oc->boc->state <= BOS_REQ_DONE); |
546 |
77918 |
if (oc->boc->state != BOS_REQ_DONE) { |
547 |
1600 |
bo->req = NULL; |
548 |
1600 |
ObjSetState(wrk, oc, BOS_REQ_DONE); |
549 |
1600 |
} |
550 |
|
|
551 |
77918 |
if (bo->do_esi) |
552 |
12400 |
bo->do_stream = 0; |
553 |
77918 |
if (wrk->vpi->handling == VCL_RET_PASS) { |
554 |
880 |
oc->flags |= OC_F_HFP; |
555 |
880 |
bo->uncacheable = 1; |
556 |
880 |
wrk->vpi->handling = VCL_RET_DELIVER; |
557 |
880 |
} |
558 |
77918 |
if (!bo->uncacheable || !bo->do_stream) |
559 |
53560 |
oc->boc->transit_buffer = 0; |
560 |
77918 |
if (bo->uncacheable) |
561 |
29878 |
oc->flags |= OC_F_HFM; |
562 |
|
|
563 |
77918 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
564 |
|
|
565 |
77918 |
return (bo->was_304 ? F_STP_CONDFETCH : F_STP_FETCH); |
566 |
91035 |
} |
567 |
|
|
568 |
|
/*-------------------------------------------------------------------- |
569 |
|
*/ |
570 |
|
|
571 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
572 |
43649 |
vbf_stp_fetchbody(struct worker *wrk, struct busyobj *bo) |
573 |
|
{ |
574 |
|
ssize_t l; |
575 |
|
uint8_t *ptr; |
576 |
43649 |
enum vfp_status vfps = VFP_ERROR; |
577 |
|
ssize_t est; |
578 |
|
struct vfp_ctx *vfc; |
579 |
|
struct objcore *oc; |
580 |
|
|
581 |
43649 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
582 |
43649 |
vfc = bo->vfc; |
583 |
43649 |
CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC); |
584 |
43649 |
oc = bo->fetch_objcore; |
585 |
43649 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
586 |
|
|
587 |
43649 |
AN(vfc->vfp_nxt); |
588 |
|
|
589 |
43649 |
est = bo->htc->content_length; |
590 |
43649 |
if (est < 0) |
591 |
5880 |
est = 0; |
592 |
|
|
593 |
43649 |
do { |
594 |
2239414 |
if (oc->flags & OC_F_CANCEL) { |
595 |
|
/* |
596 |
|
* A pass object and delivery was terminated |
597 |
|
* We don't fail the fetch, in order for HitMiss |
598 |
|
* objects to be created. |
599 |
|
*/ |
600 |
114 |
AN(oc->flags & OC_F_HFM); |
601 |
114 |
VSLb(wrk->vsl, SLT_Debug, |
602 |
|
"Fetch: Pass delivery abandoned"); |
603 |
114 |
bo->htc->doclose = SC_RX_BODY; |
604 |
114 |
break; |
605 |
|
} |
606 |
2239300 |
AZ(vfc->failed); |
607 |
2239300 |
l = est; |
608 |
2239300 |
assert(l >= 0); |
609 |
2239300 |
if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK) { |
610 |
200 |
bo->htc->doclose = SC_RX_BODY; |
611 |
200 |
break; |
612 |
|
} |
613 |
|
|
614 |
2239100 |
AZ(vfc->failed); |
615 |
2239100 |
vfps = VFP_Suck(vfc, ptr, &l); |
616 |
2239100 |
if (l >= 0 && vfps != VFP_ERROR) { |
617 |
2237808 |
VFP_Extend(vfc, l, vfps); |
618 |
2237808 |
if (est >= l) |
619 |
113166 |
est -= l; |
620 |
|
else |
621 |
2124642 |
est = 0; |
622 |
2237808 |
} |
623 |
2239100 |
} while (vfps == VFP_OK); |
624 |
|
|
625 |
43635 |
if (vfc->failed) { |
626 |
1480 |
(void)VFP_Error(vfc, "Fetch pipeline failed to process"); |
627 |
1480 |
bo->htc->doclose = SC_RX_BODY; |
628 |
1480 |
vbf_cleanup(bo); |
629 |
1480 |
if (!bo->do_stream) { |
630 |
840 |
assert(oc->boc->state < BOS_STREAM); |
631 |
|
// XXX: doclose = ? |
632 |
840 |
return (F_STP_ERROR); |
633 |
|
} else { |
634 |
640 |
wrk->stats->fetch_failed++; |
635 |
640 |
return (F_STP_FAIL); |
636 |
|
} |
637 |
|
} |
638 |
|
|
639 |
42155 |
return (F_STP_FETCHEND); |
640 |
43635 |
} |
641 |
|
|
642 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
643 |
76837 |
vbf_stp_fetch(struct worker *wrk, struct busyobj *bo) |
644 |
|
{ |
645 |
|
struct vrt_ctx ctx[1]; |
646 |
|
struct objcore *oc; |
647 |
|
|
648 |
76837 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
649 |
76837 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
650 |
76837 |
oc = bo->fetch_objcore; |
651 |
76837 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
652 |
|
|
653 |
76837 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
654 |
|
|
655 |
76837 |
if (bo->htc == NULL) { |
656 |
80 |
(void)VFP_Error(bo->vfc, "No backend connection (rollback?)"); |
657 |
80 |
vbf_cleanup(bo); |
658 |
80 |
return (F_STP_ERROR); |
659 |
|
} |
660 |
|
|
661 |
|
/* No body -> done */ |
662 |
76757 |
if (bo->htc->body_status == BS_NONE || bo->htc->content_length == 0) { |
663 |
30000 |
http_Unset(bo->beresp, H_Content_Encoding); |
664 |
30000 |
bo->do_gzip = bo->do_gunzip = 0; |
665 |
30000 |
bo->do_stream = 0; |
666 |
30000 |
bo->vfp_filter_list = ""; |
667 |
76757 |
} else if (bo->vfp_filter_list == NULL) { |
668 |
46520 |
bo->vfp_filter_list = VBF_Get_Filter_List(bo); |
669 |
46520 |
} |
670 |
|
|
671 |
76757 |
if (bo->vfp_filter_list == NULL || |
672 |
76757 |
VCL_StackVFP(bo->vfc, bo->vcl, bo->vfp_filter_list)) { |
673 |
959 |
(bo)->htc->doclose = SC_OVERLOAD; |
674 |
959 |
vbf_cleanup(bo); |
675 |
959 |
return (F_STP_ERROR); |
676 |
|
} |
677 |
|
|
678 |
75798 |
if (oc->flags & OC_F_PRIVATE) |
679 |
25078 |
AN(bo->uncacheable); |
680 |
|
|
681 |
75798 |
oc->boc->fetched_so_far = 0; |
682 |
|
|
683 |
75798 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
684 |
75798 |
VCL_Bo2Ctx(ctx, bo); |
685 |
|
|
686 |
75798 |
if (VFP_Open(ctx, bo->vfc)) { |
687 |
2120 |
(void)VFP_Error(bo->vfc, "Fetch pipeline failed to open"); |
688 |
2120 |
bo->htc->doclose = SC_RX_BODY; |
689 |
2120 |
vbf_cleanup(bo); |
690 |
2120 |
return (F_STP_ERROR); |
691 |
|
} |
692 |
|
|
693 |
73678 |
if (vbf_beresp2obj(bo)) { |
694 |
120 |
bo->htc->doclose = SC_RX_BODY; |
695 |
120 |
vbf_cleanup(bo); |
696 |
120 |
return (F_STP_ERROR); |
697 |
|
} |
698 |
|
|
699 |
|
#define OBJ_FLAG(U, l, v) \ |
700 |
|
if (bo->vfc->obj_flags & OF_##U) \ |
701 |
|
ObjSetFlag(bo->wrk, oc, OF_##U, 1); |
702 |
|
#include "tbl/obj_attr.h" |
703 |
|
|
704 |
73558 |
if (!(oc->flags & OC_F_HFM) && |
705 |
46638 |
http_IsStatus(bo->beresp, 200) && ( |
706 |
45958 |
RFC2616_Strong_LM(bo->beresp, NULL, NULL) != NULL || |
707 |
45002 |
http_GetHdr(bo->beresp, H_ETag, NULL))) |
708 |
2076 |
ObjSetFlag(bo->wrk, oc, OF_IMSCAND, 1); |
709 |
|
|
710 |
73558 |
assert(oc->boc->refcount >= 1); |
711 |
|
|
712 |
73558 |
assert(oc->boc->state == BOS_REQ_DONE); |
713 |
|
|
714 |
73558 |
if (bo->do_stream) { |
715 |
32397 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
716 |
32397 |
HSH_Unbusy(wrk, oc); |
717 |
32397 |
ObjSetState(wrk, oc, BOS_STREAM); |
718 |
32397 |
} |
719 |
|
|
720 |
|
VSLb(bo->vsl, SLT_Fetch_Body, "%u %s %s", |
721 |
|
bo->htc->body_status->nbr, bo->htc->body_status->name, |
722 |
|
bo->do_stream ? "stream" : "-"); |
723 |
|
|
724 |
73558 |
if (bo->htc->body_status != BS_NONE) { |
725 |
43638 |
assert(bo->htc->body_status != BS_ERROR); |
726 |
43638 |
return (F_STP_FETCHBODY); |
727 |
|
} |
728 |
29920 |
AZ(bo->vfc->failed); |
729 |
|
return (F_STP_FETCHEND); |
730 |
|
} |
731 |
|
|
732 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
733 |
73033 |
vbf_stp_fetchend(struct worker *wrk, struct busyobj *bo) |
734 |
|
{ |
735 |
|
|
736 |
|
struct objcore *oc; |
737 |
|
|
738 |
73033 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
739 |
73033 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
740 |
73033 |
oc = bo->fetch_objcore; |
741 |
73033 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
742 |
|
|
743 |
73033 |
AZ(bo->vfc->failed); |
744 |
|
|
745 |
|
/* Recycle the backend connection before setting BOS_FINISHED to |
746 |
|
give predictable backend reuse behavior for varnishtest */ |
747 |
73033 |
vbf_cleanup(bo); |
748 |
|
|
749 |
73033 |
AZ(ObjSetU64(wrk, oc, OA_LEN, oc->boc->fetched_so_far)); |
750 |
|
|
751 |
73033 |
if (bo->do_stream) |
752 |
32677 |
assert(oc->boc->state == BOS_STREAM); |
753 |
|
else { |
754 |
40356 |
assert(oc->boc->state == BOS_REQ_DONE); |
755 |
40356 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
756 |
40356 |
HSH_Unbusy(wrk, oc); |
757 |
|
} |
758 |
|
|
759 |
73033 |
ObjSetState(wrk, oc, BOS_FINISHED); |
760 |
73033 |
VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk)); |
761 |
73033 |
if (bo->stale_oc != NULL) { |
762 |
5840 |
VSL(SLT_ExpKill, NO_VXID, "VBF_Superseded x=%ju n=%ju", |
763 |
2920 |
VXID(ObjGetXID(wrk, bo->stale_oc)), |
764 |
2920 |
VXID(ObjGetXID(wrk, bo->fetch_objcore))); |
765 |
2920 |
HSH_Replace(bo->stale_oc, bo->fetch_objcore); |
766 |
2920 |
} |
767 |
73033 |
return (F_STP_DONE); |
768 |
|
} |
769 |
|
|
770 |
|
/*-------------------------------------------------------------------- |
771 |
|
*/ |
772 |
|
|
773 |
|
struct vbf_objiter_priv { |
774 |
|
unsigned magic; |
775 |
|
#define VBF_OBITER_PRIV_MAGIC 0x3c272a17 |
776 |
|
struct busyobj *bo; |
777 |
|
// not yet allocated |
778 |
|
ssize_t l; |
779 |
|
// current allocation |
780 |
|
uint8_t *p; |
781 |
|
ssize_t pl; |
782 |
|
}; |
783 |
|
|
784 |
|
static int v_matchproto_(objiterate_f) |
785 |
1000 |
vbf_objiterate(void *priv, unsigned flush, const void *ptr, ssize_t len) |
786 |
|
{ |
787 |
|
struct vbf_objiter_priv *vop; |
788 |
|
ssize_t l; |
789 |
1000 |
const uint8_t *ps = ptr; |
790 |
|
|
791 |
1000 |
CAST_OBJ_NOTNULL(vop, priv, VBF_OBITER_PRIV_MAGIC); |
792 |
1000 |
CHECK_OBJ_NOTNULL(vop->bo, BUSYOBJ_MAGIC); |
793 |
|
|
794 |
1000 |
flush &= OBJ_ITER_END; |
795 |
|
|
796 |
2000 |
while (len > 0) { |
797 |
1000 |
if (vop->pl == 0) { |
798 |
960 |
vop->p = NULL; |
799 |
960 |
AN(vop->l); |
800 |
960 |
vop->pl = vop->l; |
801 |
1920 |
if (VFP_GetStorage(vop->bo->vfc, &vop->pl, &vop->p) |
802 |
960 |
!= VFP_OK) |
803 |
0 |
return (1); |
804 |
960 |
if (vop->pl < vop->l) |
805 |
40 |
vop->l -= vop->pl; |
806 |
|
else |
807 |
920 |
vop->l = 0; |
808 |
960 |
} |
809 |
1000 |
AN(vop->pl); |
810 |
1000 |
AN(vop->p); |
811 |
|
|
812 |
1000 |
l = vmin(vop->pl, len); |
813 |
1000 |
memcpy(vop->p, ps, l); |
814 |
1920 |
VFP_Extend(vop->bo->vfc, l, |
815 |
1000 |
flush && l == len ? VFP_END : VFP_OK); |
816 |
1000 |
ps += l; |
817 |
1000 |
vop->p += l; |
818 |
1000 |
len -= l; |
819 |
1000 |
vop->pl -= l; |
820 |
|
} |
821 |
1000 |
if (flush) |
822 |
920 |
AZ(vop->l); |
823 |
1000 |
return (0); |
824 |
1000 |
} |
825 |
|
|
826 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
827 |
1080 |
vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo) |
828 |
|
{ |
829 |
|
struct boc *stale_boc; |
830 |
|
enum boc_state_e stale_state; |
831 |
|
struct objcore *oc, *stale_oc; |
832 |
|
struct vbf_objiter_priv vop[1]; |
833 |
|
|
834 |
1080 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
835 |
1080 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
836 |
1080 |
oc = bo->fetch_objcore; |
837 |
1080 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
838 |
1080 |
stale_oc = bo->stale_oc; |
839 |
1080 |
CHECK_OBJ_NOTNULL(stale_oc, OBJCORE_MAGIC); |
840 |
|
|
841 |
1080 |
stale_boc = HSH_RefBoc(stale_oc); |
842 |
1080 |
CHECK_OBJ_ORNULL(stale_boc, BOC_MAGIC); |
843 |
1080 |
if (stale_boc) { |
844 |
|
/* Wait for the stale object to become fully fetched, so |
845 |
|
* that we can catch fetch errors, before we unbusy the |
846 |
|
* new object. This serves two purposes. First it helps |
847 |
|
* with request coalescing, and stops long chains of |
848 |
|
* IMS-updated short-TTL objects all streaming from a |
849 |
|
* single slow body fetch. Second it makes sure that all |
850 |
|
* the object attributes are complete when we copy them |
851 |
|
* (this would be an issue for ie OA_GZIPBITS). */ |
852 |
120 |
VSLb(bo->vsl, SLT_Notice, |
853 |
|
"vsl: Conditional fetch wait for streaming object"); |
854 |
120 |
ObjWaitState(stale_oc, BOS_FINISHED); |
855 |
120 |
stale_state = stale_boc->state; |
856 |
120 |
HSH_DerefBoc(bo->wrk, stale_oc); |
857 |
120 |
stale_boc = NULL; |
858 |
120 |
if (stale_state != BOS_FINISHED) { |
859 |
80 |
assert(stale_state == BOS_FAILED); |
860 |
80 |
AN(stale_oc->flags & OC_F_FAILED); |
861 |
80 |
} |
862 |
120 |
} |
863 |
|
|
864 |
1080 |
AZ(stale_boc); |
865 |
1080 |
if (stale_oc->flags & OC_F_FAILED) { |
866 |
80 |
(void)VFP_Error(bo->vfc, "Template object failed"); |
867 |
80 |
vbf_cleanup(bo); |
868 |
80 |
wrk->stats->fetch_failed++; |
869 |
80 |
return (F_STP_FAIL); |
870 |
|
} |
871 |
|
|
872 |
1000 |
if (vbf_beresp2obj(bo)) { |
873 |
40 |
vbf_cleanup(bo); |
874 |
40 |
wrk->stats->fetch_failed++; |
875 |
40 |
return (F_STP_FAIL); |
876 |
|
} |
877 |
|
|
878 |
960 |
if (ObjHasAttr(bo->wrk, stale_oc, OA_ESIDATA)) |
879 |
40 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_ESIDATA)); |
880 |
|
|
881 |
960 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_FLAGS)); |
882 |
960 |
if (oc->flags & OC_F_HFM) |
883 |
80 |
ObjSetFlag(bo->wrk, oc, OF_IMSCAND, 0); |
884 |
960 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_GZIPBITS)); |
885 |
|
|
886 |
960 |
if (bo->do_stream) { |
887 |
920 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
888 |
920 |
HSH_Unbusy(wrk, oc); |
889 |
920 |
ObjSetState(wrk, oc, BOS_STREAM); |
890 |
920 |
} |
891 |
|
|
892 |
960 |
INIT_OBJ(vop, VBF_OBITER_PRIV_MAGIC); |
893 |
960 |
vop->bo = bo; |
894 |
960 |
vop->l = ObjGetLen(bo->wrk, stale_oc); |
895 |
960 |
if (ObjIterate(wrk, stale_oc, vop, vbf_objiterate, 0)) |
896 |
0 |
(void)VFP_Error(bo->vfc, "Template object failed"); |
897 |
|
|
898 |
960 |
if (bo->vfc->failed) { |
899 |
0 |
vbf_cleanup(bo); |
900 |
0 |
wrk->stats->fetch_failed++; |
901 |
0 |
return (F_STP_FAIL); |
902 |
|
} |
903 |
960 |
return (F_STP_FETCHEND); |
904 |
1080 |
} |
905 |
|
|
906 |
|
/*-------------------------------------------------------------------- |
907 |
|
* Create synth object |
908 |
|
* |
909 |
|
* replaces a stale object unless |
910 |
|
* - abandoning the bereq or |
911 |
|
* - leaving vcl_backend_error with return (deliver) and beresp.ttl == 0s or |
912 |
|
* - there is a waitinglist on this object because in this case the default ttl |
913 |
|
* would be 1s, so we might be looking at the same case as the previous |
914 |
|
* |
915 |
|
* We do want the stale replacement to avoid an object pileup with short ttl and |
916 |
|
* long grace/keep, yet there could exist cases where a cache object is |
917 |
|
* deliberately created to momentarily override a stale object. |
918 |
|
* |
919 |
|
* If this case exists, we should add a vcl veto (e.g. beresp.replace_stale with |
920 |
|
* default true) |
921 |
|
*/ |
922 |
|
|
923 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
924 |
15680 |
vbf_stp_error(struct worker *wrk, struct busyobj *bo) |
925 |
|
{ |
926 |
|
ssize_t l, ll, o; |
927 |
|
vtim_real now; |
928 |
|
uint8_t *ptr; |
929 |
|
struct vsb *synth_body; |
930 |
|
struct objcore *stale, *oc; |
931 |
|
|
932 |
15680 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
933 |
15680 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
934 |
15680 |
oc = bo->fetch_objcore; |
935 |
15680 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
936 |
15680 |
AN(oc->flags & OC_F_BUSY); |
937 |
15680 |
assert(bo->director_state == DIR_S_NULL); |
938 |
|
|
939 |
15680 |
if (wrk->vpi->handling != VCL_RET_ERROR) |
940 |
15119 |
wrk->stats->fetch_failed++; |
941 |
|
|
942 |
15680 |
now = W_TIM_real(wrk); |
943 |
15680 |
VSLb_ts_busyobj(bo, "Error", now); |
944 |
|
|
945 |
15680 |
if (oc->stobj->stevedore != NULL) { |
946 |
840 |
oc->boc->fetched_so_far = 0; |
947 |
840 |
ObjFreeObj(bo->wrk, oc); |
948 |
840 |
} |
949 |
|
|
950 |
15680 |
if (bo->storage == NULL) |
951 |
960 |
bo->storage = STV_next(); |
952 |
|
|
953 |
|
// XXX: reset all beresp flags ? |
954 |
|
|
955 |
15680 |
HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); |
956 |
15680 |
if (bo->err_code > 0) |
957 |
960 |
http_PutResponse(bo->beresp, "HTTP/1.1", bo->err_code, |
958 |
480 |
bo->err_reason); |
959 |
|
else |
960 |
15200 |
http_PutResponse(bo->beresp, "HTTP/1.1", 503, |
961 |
|
"Backend fetch failed"); |
962 |
|
|
963 |
15680 |
http_TimeHeader(bo->beresp, "Date: ", now); |
964 |
15680 |
http_SetHeader(bo->beresp, "Server: Varnish"); |
965 |
|
|
966 |
15680 |
stale = bo->stale_oc; |
967 |
15680 |
oc->t_origin = now; |
968 |
15680 |
if (!VTAILQ_EMPTY(&oc->objhead->waitinglist)) { |
969 |
|
/* |
970 |
|
* If there is a waitinglist, it means that there is no |
971 |
|
* grace-able object, so cache the error return for a |
972 |
|
* short time, so the waiting list can drain, rather than |
973 |
|
* each objcore on the waiting list sequentially attempt |
974 |
|
* to fetch from the backend. |
975 |
|
*/ |
976 |
109 |
oc->ttl = 1; |
977 |
109 |
oc->grace = 5; |
978 |
109 |
oc->keep = 5; |
979 |
109 |
stale = NULL; |
980 |
109 |
} else { |
981 |
15571 |
oc->ttl = 0; |
982 |
15571 |
oc->grace = 0; |
983 |
15571 |
oc->keep = 0; |
984 |
|
} |
985 |
|
|
986 |
15680 |
synth_body = VSB_new_auto(); |
987 |
15680 |
AN(synth_body); |
988 |
|
|
989 |
15680 |
VCL_backend_error_method(bo->vcl, wrk, NULL, bo, synth_body); |
990 |
|
|
991 |
15680 |
AZ(VSB_finish(synth_body)); |
992 |
|
|
993 |
15680 |
if (wrk->vpi->handling == VCL_RET_ABANDON || wrk->vpi->handling == VCL_RET_FAIL) { |
994 |
1120 |
VSB_destroy(&synth_body); |
995 |
1120 |
return (F_STP_FAIL); |
996 |
|
} |
997 |
|
|
998 |
14560 |
if (wrk->vpi->handling == VCL_RET_RETRY) { |
999 |
400 |
VSB_destroy(&synth_body); |
1000 |
400 |
if (bo->retries++ < bo->max_retries) |
1001 |
360 |
return (F_STP_RETRY); |
1002 |
40 |
VSLb(bo->vsl, SLT_VCL_Error, "Too many retries, failing"); |
1003 |
40 |
return (F_STP_FAIL); |
1004 |
|
} |
1005 |
|
|
1006 |
14160 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
1007 |
|
|
1008 |
14160 |
assert(bo->vfc->wrk == bo->wrk); |
1009 |
14160 |
assert(bo->vfc->oc == oc); |
1010 |
14160 |
assert(bo->vfc->resp == bo->beresp); |
1011 |
14160 |
assert(bo->vfc->req == bo->bereq); |
1012 |
|
|
1013 |
14160 |
if (vbf_beresp2obj(bo)) { |
1014 |
80 |
VSB_destroy(&synth_body); |
1015 |
80 |
return (F_STP_FAIL); |
1016 |
|
} |
1017 |
|
|
1018 |
14080 |
oc->boc->transit_buffer = 0; |
1019 |
|
|
1020 |
14080 |
ll = VSB_len(synth_body); |
1021 |
14080 |
o = 0; |
1022 |
26199 |
while (ll > 0) { |
1023 |
12159 |
l = ll; |
1024 |
12159 |
if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK) { |
1025 |
40 |
VSB_destroy(&synth_body); |
1026 |
40 |
return (F_STP_FAIL); |
1027 |
|
} |
1028 |
12119 |
l = vmin(l, ll); |
1029 |
12119 |
memcpy(ptr, VSB_data(synth_body) + o, l); |
1030 |
12119 |
VFP_Extend(bo->vfc, l, l == ll ? VFP_END : VFP_OK); |
1031 |
12119 |
ll -= l; |
1032 |
12119 |
o += l; |
1033 |
|
} |
1034 |
14040 |
assert(o == VSB_len(synth_body)); |
1035 |
14040 |
AZ(ObjSetU64(wrk, oc, OA_LEN, o)); |
1036 |
14040 |
VSB_destroy(&synth_body); |
1037 |
14040 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
1038 |
14040 |
HSH_Unbusy(wrk, oc); |
1039 |
14040 |
if (stale != NULL && oc->ttl > 0) |
1040 |
880 |
HSH_Kill(stale); |
1041 |
14040 |
ObjSetState(wrk, oc, BOS_FINISHED); |
1042 |
14040 |
return (F_STP_DONE); |
1043 |
15680 |
} |
1044 |
|
|
1045 |
|
/*-------------------------------------------------------------------- |
1046 |
|
*/ |
1047 |
|
|
1048 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
1049 |
2680 |
vbf_stp_fail(struct worker *wrk, struct busyobj *bo) |
1050 |
|
{ |
1051 |
|
struct objcore *oc; |
1052 |
|
|
1053 |
2680 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1054 |
2680 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1055 |
2680 |
oc = bo->fetch_objcore; |
1056 |
2680 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1057 |
|
|
1058 |
2680 |
assert(oc->boc->state < BOS_FINISHED); |
1059 |
2680 |
HSH_Fail(oc); |
1060 |
2680 |
if (!(oc->flags & OC_F_BUSY)) |
1061 |
640 |
HSH_Kill(oc); |
1062 |
2680 |
ObjSetState(wrk, oc, BOS_FAILED); |
1063 |
2680 |
return (F_STP_DONE); |
1064 |
|
} |
1065 |
|
|
1066 |
|
/*-------------------------------------------------------------------- |
1067 |
|
*/ |
1068 |
|
|
1069 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
1070 |
0 |
vbf_stp_done(struct worker *wrk, struct busyobj *bo) |
1071 |
|
{ |
1072 |
|
|
1073 |
0 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1074 |
0 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1075 |
0 |
WRONG("Just plain wrong"); |
1076 |
0 |
NEEDLESS(return (F_STP_DONE)); |
1077 |
|
} |
1078 |
|
|
1079 |
|
static void v_matchproto_(task_func_t) |
1080 |
89758 |
vbf_fetch_thread(struct worker *wrk, void *priv) |
1081 |
|
{ |
1082 |
|
struct vrt_ctx ctx[1]; |
1083 |
|
struct busyobj *bo; |
1084 |
|
struct objcore *oc; |
1085 |
|
const struct fetch_step *stp; |
1086 |
|
|
1087 |
89758 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1088 |
89758 |
CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); |
1089 |
89758 |
CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); |
1090 |
89758 |
oc = bo->fetch_objcore; |
1091 |
89758 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1092 |
|
|
1093 |
89758 |
THR_SetBusyobj(bo); |
1094 |
89758 |
stp = F_STP_MKBEREQ; |
1095 |
89758 |
assert(isnan(bo->t_first)); |
1096 |
89758 |
assert(isnan(bo->t_prev)); |
1097 |
89758 |
VSLb_ts_busyobj(bo, "Start", W_TIM_real(wrk)); |
1098 |
|
|
1099 |
89758 |
bo->wrk = wrk; |
1100 |
89758 |
wrk->vsl = bo->vsl; |
1101 |
|
|
1102 |
|
#if 0 |
1103 |
|
if (bo->stale_oc != NULL) { |
1104 |
|
CHECK_OBJ_NOTNULL(bo->stale_oc, OBJCORE_MAGIC); |
1105 |
|
/* We don't want the oc/stevedore ops in fetching thread */ |
1106 |
|
if (!ObjCheckFlag(wrk, bo->stale_oc, OF_IMSCAND)) |
1107 |
|
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1108 |
|
} |
1109 |
|
#endif |
1110 |
|
|
1111 |
89758 |
VCL_TaskEnter(bo->privs); |
1112 |
484964 |
while (stp != F_STP_DONE) { |
1113 |
395206 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1114 |
395206 |
assert(oc->boc->refcount >= 1); |
1115 |
395206 |
if (oc->boc->state < BOS_REQ_DONE) |
1116 |
93027 |
AN(bo->req); |
1117 |
|
else |
1118 |
302179 |
AZ(bo->req); |
1119 |
395206 |
AN(stp); |
1120 |
395206 |
AN(stp->name); |
1121 |
395206 |
AN(stp->func); |
1122 |
395206 |
stp = stp->func(wrk, bo); |
1123 |
|
} |
1124 |
|
|
1125 |
89758 |
assert(bo->director_state == DIR_S_NULL); |
1126 |
|
|
1127 |
89758 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
1128 |
89758 |
VCL_Bo2Ctx(ctx, bo); |
1129 |
89758 |
VCL_TaskLeave(ctx, bo->privs); |
1130 |
89758 |
http_Teardown(bo->bereq); |
1131 |
89758 |
http_Teardown(bo->beresp); |
1132 |
|
// cannot make assumptions about the number of references here #3434 |
1133 |
89758 |
if (bo->bereq_body != NULL) |
1134 |
720 |
(void) HSH_DerefObjCore(bo->wrk, &bo->bereq_body, 0); |
1135 |
|
|
1136 |
89758 |
if (oc->boc->state == BOS_FINISHED) { |
1137 |
87078 |
AZ(oc->flags & OC_F_FAILED); |
1138 |
174156 |
VSLb(bo->vsl, SLT_Length, "%ju", |
1139 |
87078 |
(uintmax_t)ObjGetLen(bo->wrk, oc)); |
1140 |
87078 |
} |
1141 |
|
// AZ(oc->boc); // XXX |
1142 |
|
|
1143 |
89758 |
if (bo->stale_oc != NULL) |
1144 |
5800 |
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1145 |
|
|
1146 |
89758 |
wrk->vsl = NULL; |
1147 |
89758 |
HSH_DerefBoc(wrk, oc); |
1148 |
89758 |
SES_Rel(bo->sp); |
1149 |
89758 |
VBO_ReleaseBusyObj(wrk, &bo); |
1150 |
89758 |
THR_SetBusyobj(NULL); |
1151 |
89758 |
} |
1152 |
|
|
1153 |
|
/*-------------------------------------------------------------------- |
1154 |
|
*/ |
1155 |
|
|
1156 |
|
void |
1157 |
89799 |
VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, |
1158 |
|
struct objcore *oldoc, enum vbf_fetch_mode_e mode) |
1159 |
|
{ |
1160 |
|
struct boc *boc; |
1161 |
|
struct busyobj *bo; |
1162 |
|
enum task_prio prio; |
1163 |
|
const char *how; |
1164 |
|
|
1165 |
89799 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1166 |
89799 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
1167 |
89799 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1168 |
89799 |
AN(oc->flags & OC_F_BUSY); |
1169 |
89799 |
CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC); |
1170 |
|
|
1171 |
89799 |
bo = VBO_GetBusyObj(wrk, req); |
1172 |
89799 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1173 |
89799 |
AN(bo->vcl); |
1174 |
|
|
1175 |
89799 |
boc = HSH_RefBoc(oc); |
1176 |
89799 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
1177 |
|
|
1178 |
89799 |
switch (mode) { |
1179 |
|
case VBF_PASS: |
1180 |
31200 |
prio = TASK_QUEUE_BO; |
1181 |
31200 |
how = "pass"; |
1182 |
31200 |
bo->uncacheable = 1; |
1183 |
31200 |
break; |
1184 |
|
case VBF_NORMAL: |
1185 |
55199 |
prio = TASK_QUEUE_BO; |
1186 |
55199 |
how = "fetch"; |
1187 |
55199 |
break; |
1188 |
|
case VBF_BACKGROUND: |
1189 |
3400 |
prio = TASK_QUEUE_BG; |
1190 |
3400 |
how = "bgfetch"; |
1191 |
3400 |
bo->is_bgfetch = 1; |
1192 |
3400 |
break; |
1193 |
|
default: |
1194 |
0 |
WRONG("Wrong fetch mode"); |
1195 |
0 |
} |
1196 |
|
|
1197 |
|
#define REQ_BEREQ_FLAG(l, r, w, d) bo->l = req->l; |
1198 |
|
#include "tbl/req_bereq_flags.h" |
1199 |
|
|
1200 |
|
VSLb(bo->vsl, SLT_Begin, "bereq %ju %s", VXID(req->vsl->wid), how); |
1201 |
|
VSLbs(bo->vsl, SLT_VCL_use, TOSTRAND(VCL_Name(bo->vcl))); |
1202 |
|
VSLb(req->vsl, SLT_Link, "bereq %ju %s", VXID(bo->vsl->wid), how); |
1203 |
|
|
1204 |
|
THR_SetBusyobj(bo); |
1205 |
|
|
1206 |
|
bo->sp = req->sp; |
1207 |
|
SES_Ref(bo->sp); |
1208 |
|
|
1209 |
|
oc->boc->vary = req->vary_b; |
1210 |
|
req->vary_b = NULL; |
1211 |
|
|
1212 |
|
HSH_Ref(oc); |
1213 |
89799 |
AZ(bo->fetch_objcore); |
1214 |
|
bo->fetch_objcore = oc; |
1215 |
|
|
1216 |
89799 |
AZ(bo->stale_oc); |
1217 |
89799 |
if (oldoc != NULL) { |
1218 |
5840 |
assert(oldoc->refcnt > 0); |
1219 |
5840 |
HSH_Ref(oldoc); |
1220 |
5840 |
bo->stale_oc = oldoc; |
1221 |
5840 |
} |
1222 |
|
|
1223 |
89799 |
AZ(bo->req); |
1224 |
|
bo->req = req; |
1225 |
|
|
1226 |
|
bo->fetch_task->priv = bo; |
1227 |
|
bo->fetch_task->func = vbf_fetch_thread; |
1228 |
|
|
1229 |
89799 |
if (Pool_Task(wrk->pool, bo->fetch_task, prio)) { |
1230 |
95 |
wrk->stats->bgfetch_no_thread++; |
1231 |
95 |
VSLb(bo->vsl, SLT_FetchError, |
1232 |
|
"No thread available for bgfetch"); |
1233 |
95 |
(void)vbf_stp_fail(req->wrk, bo); |
1234 |
95 |
if (bo->stale_oc != NULL) |
1235 |
40 |
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1236 |
95 |
HSH_DerefBoc(wrk, oc); |
1237 |
95 |
SES_Rel(bo->sp); |
1238 |
95 |
THR_SetBusyobj(NULL); |
1239 |
95 |
VBO_ReleaseBusyObj(wrk, &bo); |
1240 |
95 |
} else { |
1241 |
89704 |
THR_SetBusyobj(NULL); |
1242 |
89704 |
bo = NULL; /* ref transferred to fetch thread */ |
1243 |
89704 |
if (mode == VBF_BACKGROUND) { |
1244 |
3360 |
ObjWaitState(oc, BOS_REQ_DONE); |
1245 |
3360 |
(void)VRB_Ignore(req); |
1246 |
3360 |
} else { |
1247 |
86344 |
ObjWaitState(oc, BOS_STREAM); |
1248 |
86344 |
if (oc->boc->state == BOS_FAILED) { |
1249 |
1080 |
AN((oc->flags & OC_F_FAILED)); |
1250 |
1080 |
} else { |
1251 |
85264 |
AZ(oc->flags & OC_F_BUSY); |
1252 |
|
} |
1253 |
|
} |
1254 |
|
} |
1255 |
89799 |
AZ(bo); |
1256 |
|
VSLb_ts_req(req, "Fetch", W_TIM_real(wrk)); |
1257 |
89799 |
assert(oc->boc == boc); |
1258 |
|
HSH_DerefBoc(wrk, oc); |
1259 |
89799 |
if (mode == VBF_BACKGROUND) |
1260 |
3400 |
(void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY); |
1261 |
|
} |