| | varnish-cache/bin/varnishd/cache/cache_fetch_proc.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include <stdlib.h> |
34 |
|
|
35 |
|
#include "cache_varnishd.h" |
36 |
|
#include "cache_filter.h" |
37 |
|
#include "vcli_serve.h" |
38 |
|
|
39 |
|
static unsigned fetchfrag; |
40 |
|
|
41 |
|
/*-------------------------------------------------------------------- |
42 |
|
* We want to issue the first error we encounter on fetching and |
43 |
|
* suppress the rest. This function does that. |
44 |
|
* |
45 |
|
* Other code is allowed to look at busyobj->fetch_failed to bail out |
46 |
|
* |
47 |
|
* For convenience, always return VFP_ERROR |
48 |
|
*/ |
49 |
|
|
50 |
|
enum vfp_status |
51 |
9840 |
VFP_Error(struct vfp_ctx *vc, const char *fmt, ...) |
52 |
|
{ |
53 |
|
va_list ap; |
54 |
|
|
55 |
9840 |
CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC); |
56 |
9840 |
if (!vc->failed) { |
57 |
6040 |
va_start(ap, fmt); |
58 |
6040 |
VSLbv(vc->wrk->vsl, SLT_FetchError, fmt, ap); |
59 |
6040 |
va_end(ap); |
60 |
6040 |
vc->failed = 1; |
61 |
6040 |
} |
62 |
9840 |
return (VFP_ERROR); |
63 |
|
} |
64 |
|
|
65 |
|
/*-------------------------------------------------------------------- |
66 |
|
* Fetch Storage to put object into. |
67 |
|
* |
68 |
|
*/ |
69 |
|
|
70 |
|
enum vfp_status |
71 |
2299168 |
VFP_GetStorage(struct vfp_ctx *vc, ssize_t *sz, uint8_t **ptr) |
72 |
|
{ |
73 |
|
|
74 |
2299168 |
CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC); |
75 |
2299168 |
AN(sz); |
76 |
2299168 |
assert(*sz >= 0); |
77 |
2299168 |
AN(ptr); |
78 |
|
|
79 |
2299168 |
if (fetchfrag > 0) |
80 |
3160 |
*sz = fetchfrag; |
81 |
|
|
82 |
2299168 |
if (!ObjGetSpace(vc->wrk, vc->oc, sz, ptr)) { |
83 |
200 |
*sz = 0; |
84 |
200 |
*ptr = NULL; |
85 |
200 |
return (VFP_Error(vc, "Could not get storage")); |
86 |
|
} |
87 |
2298968 |
assert(*sz > 0); |
88 |
2298968 |
AN(*ptr); |
89 |
2298968 |
return (VFP_OK); |
90 |
2299168 |
} |
91 |
|
|
92 |
|
void |
93 |
2266962 |
VFP_Extend(const struct vfp_ctx *vc, ssize_t sz, enum vfp_status flg) |
94 |
|
{ |
95 |
2266962 |
CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC); |
96 |
|
|
97 |
2266962 |
ObjExtend(vc->wrk, vc->oc, sz, flg == VFP_END ? 1 : 0); |
98 |
2266962 |
} |
99 |
|
|
100 |
|
/********************************************************************** |
101 |
|
*/ |
102 |
|
|
103 |
|
void |
104 |
93756 |
VFP_Setup(struct vfp_ctx *vc, struct worker *wrk) |
105 |
|
{ |
106 |
|
|
107 |
93756 |
INIT_OBJ(vc, VFP_CTX_MAGIC); |
108 |
93756 |
VTAILQ_INIT(&vc->vfp); |
109 |
93756 |
vc->wrk = wrk; |
110 |
93756 |
} |
111 |
|
|
112 |
|
/********************************************************************** |
113 |
|
* Returns the number of bytes processed by the lowest VFP in the stack |
114 |
|
*/ |
115 |
|
|
116 |
|
uint64_t |
117 |
84719 |
VFP_Close(struct vfp_ctx *vc) |
118 |
|
{ |
119 |
|
struct vfp_entry *vfe, *tmp; |
120 |
84719 |
uint64_t rv = 0; |
121 |
|
|
122 |
154197 |
VTAILQ_FOREACH_SAFE(vfe, &vc->vfp, list, tmp) { |
123 |
69478 |
if (vfe->vfp->fini != NULL) |
124 |
19280 |
vfe->vfp->fini(vc, vfe); |
125 |
69478 |
rv = vfe->bytes_out; |
126 |
138956 |
VSLb(vc->wrk->vsl, SLT_VfpAcct, "%s %ju %ju", vfe->vfp->name, |
127 |
69478 |
(uintmax_t)vfe->calls, (uintmax_t)rv); |
128 |
69478 |
VTAILQ_REMOVE(&vc->vfp, vfe, list); |
129 |
69478 |
} |
130 |
84719 |
return (rv); |
131 |
|
} |
132 |
|
|
133 |
|
int |
134 |
78315 |
VFP_Open(VRT_CTX, struct vfp_ctx *vc) |
135 |
|
{ |
136 |
|
struct vfp_entry *vfe; |
137 |
|
|
138 |
78315 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
139 |
78315 |
CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC); |
140 |
78315 |
CHECK_OBJ_NOTNULL(vc->resp, HTTP_MAGIC); |
141 |
78315 |
CHECK_OBJ_NOTNULL(vc->wrk, WORKER_MAGIC); |
142 |
78315 |
AN(vc->wrk->vsl); |
143 |
|
|
144 |
143792 |
VTAILQ_FOREACH_REVERSE(vfe, &vc->vfp, vfp_entry_s, list) { |
145 |
67597 |
if (vfe->vfp->init == NULL) |
146 |
49797 |
continue; |
147 |
17800 |
if (DO_DEBUG(DBG_PROCESSORS)) |
148 |
0 |
VSLb(vc->wrk->vsl, SLT_Debug, "VFP_Open(%s)", |
149 |
0 |
vfe->vfp->name); |
150 |
17800 |
vfe->closed = vfe->vfp->init(ctx, vc, vfe); |
151 |
17800 |
if (vfe->closed != VFP_OK && vfe->closed != VFP_NULL) { |
152 |
4240 |
(void)VFP_Error(vc, "Fetch filter %s failed to open", |
153 |
2120 |
vfe->vfp->name); |
154 |
2120 |
(void)VFP_Close(vc); |
155 |
2120 |
return (-1); |
156 |
|
} |
157 |
15680 |
} |
158 |
|
|
159 |
76195 |
return (0); |
160 |
78315 |
} |
161 |
|
|
162 |
|
/********************************************************************** |
163 |
|
* Suck data up from lower levels. |
164 |
|
* Once a layer return non VFP_OK, clean it up and produce the same |
165 |
|
* return value for any subsequent calls. |
166 |
|
*/ |
167 |
|
|
168 |
|
enum vfp_status |
169 |
4407619 |
VFP_Suck(struct vfp_ctx *vc, void *p, ssize_t *lp) |
170 |
|
{ |
171 |
|
enum vfp_status vp; |
172 |
|
struct vfp_entry *vfe, *vfe_prev; |
173 |
4407619 |
const char *prev_name = "<storage>"; |
174 |
|
ssize_t limit; |
175 |
|
|
176 |
4407619 |
CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC); |
177 |
4407619 |
AN(p); |
178 |
4407619 |
AN(lp); |
179 |
4407619 |
limit = *lp; |
180 |
4407619 |
vfe = vc->vfp_nxt; |
181 |
4407619 |
CHECK_OBJ_NOTNULL(vfe, VFP_ENTRY_MAGIC); |
182 |
4407619 |
vc->vfp_nxt = VTAILQ_NEXT(vfe, list); |
183 |
|
|
184 |
4407619 |
vfe_prev = VTAILQ_PREV(vfe, vfp_entry_s, list); |
185 |
4407619 |
if (vfe_prev != NULL) |
186 |
2164256 |
prev_name = vfe_prev->vfp->name; |
187 |
|
|
188 |
4407619 |
if (vfe->closed == VFP_NULL) { |
189 |
|
/* Layer asked to be bypassed when opened */ |
190 |
160 |
vp = VFP_Suck(vc, p, lp); |
191 |
160 |
VFP_DEBUG(vc, "bypassing %s vp=%d", vfe->vfp->name, vp); |
192 |
4407619 |
} else if (vfe->closed == VFP_OK) { |
193 |
4404499 |
vp = vfe->vfp->pull(vc, vfe, p, lp); |
194 |
4404499 |
VFP_DEBUG(vc, "%s pulled %zdB/%zdB from %s vp=%d", |
195 |
|
prev_name, *lp, limit, vfe->vfp->name, vp); |
196 |
4404499 |
if (vp != VFP_OK && vp != VFP_END && vp != VFP_ERROR) |
197 |
0 |
vp = VFP_Error(vc, "Fetch filter %s returned %d", |
198 |
0 |
vfe->vfp->name, vp); |
199 |
|
else |
200 |
4404499 |
vfe->bytes_out += *lp; |
201 |
4404499 |
vfe->closed = vp; |
202 |
4404499 |
vfe->calls++; |
203 |
4404499 |
} else { |
204 |
|
/* Already closed filter */ |
205 |
2960 |
*lp = 0; |
206 |
2960 |
vp = vfe->closed; |
207 |
2960 |
VFP_DEBUG(vc, "ignoring %s vp=%d", vfe->vfp->name, vp); |
208 |
|
} |
209 |
4407619 |
vc->vfp_nxt = vfe; |
210 |
4407619 |
assert(vp != VFP_NULL); |
211 |
4407619 |
return (vp); |
212 |
|
} |
213 |
|
|
214 |
|
/*-------------------------------------------------------------------- |
215 |
|
*/ |
216 |
|
|
217 |
|
struct vfp_entry * |
218 |
72000 |
VFP_Push(struct vfp_ctx *vc, const struct vfp *vfp) |
219 |
|
{ |
220 |
|
struct vfp_entry *vfe; |
221 |
|
|
222 |
72000 |
CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC); |
223 |
72000 |
CHECK_OBJ_NOTNULL(vc->resp, HTTP_MAGIC); |
224 |
|
|
225 |
72000 |
vfe = WS_Alloc(vc->resp->ws, sizeof *vfe); |
226 |
72000 |
if (vfe == NULL) { |
227 |
1520 |
(void)VFP_Error(vc, "Workspace overflow"); |
228 |
1520 |
return (NULL); |
229 |
|
} |
230 |
|
|
231 |
70480 |
INIT_OBJ(vfe, VFP_ENTRY_MAGIC); |
232 |
70480 |
vfe->vfp = vfp; |
233 |
70480 |
vfe->closed = VFP_OK; |
234 |
70480 |
VTAILQ_INSERT_HEAD(&vc->vfp, vfe, list); |
235 |
70480 |
vc->vfp_nxt = vfe; |
236 |
70480 |
return (vfe); |
237 |
72000 |
} |
238 |
|
|
239 |
|
/*-------------------------------------------------------------------- |
240 |
|
* Debugging aids |
241 |
|
*/ |
242 |
|
|
243 |
|
static void v_matchproto_(cli_func_t) |
244 |
240 |
debug_fragfetch(struct cli *cli, const char * const *av, void *priv) |
245 |
|
{ |
246 |
240 |
(void)priv; |
247 |
240 |
(void)cli; |
248 |
240 |
fetchfrag = strtoul(av[2], NULL, 0); |
249 |
240 |
} |
250 |
|
|
251 |
|
static struct cli_proto debug_cmds[] = { |
252 |
|
{ CLICMD_DEBUG_FRAGFETCH, "d", debug_fragfetch }, |
253 |
|
{ NULL } |
254 |
|
}; |
255 |
|
|
256 |
|
/*-------------------------------------------------------------------- |
257 |
|
* |
258 |
|
*/ |
259 |
|
|
260 |
|
void |
261 |
36676 |
VFP_Init(void) |
262 |
|
{ |
263 |
|
|
264 |
36676 |
CLI_AddFuncs(debug_cmds); |
265 |
36676 |
} |