| | varnish-cache/bin/varnishd/cache/cache_filter.h |
0 |
|
/*- |
1 |
|
* Copyright (c) 2013-2015 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
*/ |
30 |
|
|
31 |
|
struct req; |
32 |
|
struct vfp_entry; |
33 |
|
struct vfp_ctx; |
34 |
|
struct vdp_ctx; |
35 |
|
struct vdp_entry; |
36 |
|
|
37 |
|
/* Fetch processors --------------------------------------------------*/ |
38 |
|
|
39 |
|
#define VFP_DEBUG(ctx, fmt, ...) \ |
40 |
|
do { \ |
41 |
|
if (!DO_DEBUG(DBG_PROCESSORS)) \ |
42 |
|
break; \ |
43 |
|
VSLb((ctx)->wrk->vsl, SLT_Debug, "VFP:%s:%d: " fmt, \ |
44 |
|
__func__, __LINE__, __VA_ARGS__); \ |
45 |
|
} while (0) |
46 |
|
|
47 |
|
enum vfp_status { |
48 |
|
VFP_ERROR = -1, |
49 |
|
VFP_OK = 0, |
50 |
|
VFP_END = 1, |
51 |
|
VFP_NULL = 2, // signal bypass, never returned by VFP_Suck() |
52 |
|
}; |
53 |
|
|
54 |
|
typedef enum vfp_status vfp_init_f(VRT_CTX, struct vfp_ctx *, |
55 |
|
struct vfp_entry *); |
56 |
|
typedef enum vfp_status |
57 |
|
vfp_pull_f(struct vfp_ctx *, struct vfp_entry *, void *ptr, ssize_t *len); |
58 |
|
typedef void vfp_fini_f(struct vfp_ctx *, struct vfp_entry *); |
59 |
|
|
60 |
|
struct vfp { |
61 |
|
const char *name; |
62 |
|
vfp_init_f *init; |
63 |
|
vfp_pull_f *pull; |
64 |
|
vfp_fini_f *fini; |
65 |
|
const void *priv1; |
66 |
|
}; |
67 |
|
|
68 |
|
struct vfp_entry { |
69 |
|
unsigned magic; |
70 |
|
#define VFP_ENTRY_MAGIC 0xbe32a027 |
71 |
|
enum vfp_status closed; |
72 |
|
const struct vfp *vfp; |
73 |
|
void *priv1; // XXX ambiguous with priv1 in struct vfp |
74 |
|
ssize_t priv2; |
75 |
|
VTAILQ_ENTRY(vfp_entry) list; |
76 |
|
uint64_t calls; |
77 |
|
uint64_t bytes_out; |
78 |
|
}; |
79 |
|
|
80 |
|
/*-------------------------------------------------------------------- |
81 |
|
* VFP filter state |
82 |
|
*/ |
83 |
|
|
84 |
|
VTAILQ_HEAD(vfp_entry_s, vfp_entry); |
85 |
|
|
86 |
|
struct vfp_ctx { |
87 |
|
unsigned magic; |
88 |
|
#define VFP_CTX_MAGIC 0x61d9d3e5 |
89 |
|
int failed; |
90 |
|
struct http *req; |
91 |
|
struct http *resp; |
92 |
|
struct worker *wrk; |
93 |
|
struct objcore *oc; // Only first filter, if at all |
94 |
|
|
95 |
|
struct vfp_entry_s vfp; |
96 |
|
struct vfp_entry *vfp_nxt; |
97 |
|
unsigned obj_flags; |
98 |
|
}; |
99 |
|
|
100 |
|
enum vfp_status VFP_Suck(struct vfp_ctx *, void *p, ssize_t *lp); |
101 |
|
enum vfp_status VFP_Error(struct vfp_ctx *, const char *fmt, ...) |
102 |
|
v_printflike_(2, 3); |
103 |
|
|
104 |
|
void v_deprecated_ VRT_AddVFP(VRT_CTX, const struct vfp *); |
105 |
|
void v_deprecated_ VRT_RemoveVFP(VRT_CTX, const struct vfp *); |
106 |
|
|
107 |
|
/* Deliver processors ------------------------------------------------*/ |
108 |
|
|
109 |
|
enum vdp_action { |
110 |
|
VDP_NULL, /* Input buffer valid after call */ |
111 |
|
VDP_FLUSH, /* Input buffer will be invalidated */ |
112 |
|
VDP_END, /* Last buffer or after, implies VDP_FLUSH */ |
113 |
|
}; |
114 |
|
|
115 |
|
|
116 |
|
typedef int vdp_init_f(VRT_CTX, struct vdp_ctx *, void **priv); |
117 |
|
/* |
118 |
|
* Return value: |
119 |
|
* negative: Error - abandon delivery |
120 |
|
* zero: OK |
121 |
|
* positive: Don't push this VDP anyway |
122 |
|
*/ |
123 |
|
|
124 |
|
typedef int vdp_fini_f(struct vdp_ctx *, void **priv); |
125 |
|
typedef int vdp_bytes_f(struct vdp_ctx *, enum vdp_action, void **priv, |
126 |
|
const void *ptr, ssize_t len); |
127 |
|
|
128 |
|
/* |
129 |
|
* ============================================================ |
130 |
|
* vdpio io-vector interface |
131 |
|
*/ |
132 |
|
typedef int vdpio_init_f(VRT_CTX, struct vdp_ctx *, void **priv, int capacity); |
133 |
|
/* |
134 |
|
* the vdpio_init_f() are called front (object iterator) to back (consumer). |
135 |
|
* |
136 |
|
* each init function returns the minimum number of io vectors (vscarab |
137 |
|
* capacity) that it requires the next filter to accept. This capacity is |
138 |
|
* passed to the next init function such that it can allocate sufficient |
139 |
|
* space to fulfil the requirement of the previous filter. |
140 |
|
* |
141 |
|
* Return values: |
142 |
|
* < 0 : Error |
143 |
|
* == 0 ; NOOP, do not push this filter |
144 |
|
* >= 1 : capacity requirement |
145 |
|
* |
146 |
|
* typedef is shared with upgrade |
147 |
|
*/ |
148 |
|
|
149 |
|
typedef int vdpio_lease_f(struct vdp_ctx *, struct vdp_entry *, struct vscarab *scarab); |
150 |
|
/* |
151 |
|
* vdpio_lease_f() returns leases provided by this filter layer in the vscarab |
152 |
|
* probided by the caller. |
153 |
|
* |
154 |
|
* called via vdpio_pull(): the last filter is called first by delivery. Each |
155 |
|
* filter calls the previous layer for leases. The first filter calls storage. |
156 |
|
* |
157 |
|
* return values are as for ObjVAIlease() |
158 |
|
* |
159 |
|
* Other notable differences to vdp_bytes_f: |
160 |
|
* - responsible for updating (struct vdp_entry).bytes_in and .calls |
161 |
|
* |
162 |
|
*/ |
163 |
|
|
164 |
|
typedef void vdpio_fini_f(struct vdp_ctx *, void **priv); |
165 |
|
|
166 |
|
struct vdp { |
167 |
|
const char *name; |
168 |
|
vdp_init_f *init; |
169 |
|
vdp_bytes_f *bytes; |
170 |
|
vdp_fini_f *fini; |
171 |
|
const void *priv1; |
172 |
|
|
173 |
|
vdpio_init_f *io_init; |
174 |
|
vdpio_init_f *io_upgrade; |
175 |
|
vdpio_lease_f *io_lease; |
176 |
|
vdpio_fini_f *io_fini; |
177 |
|
}; |
178 |
|
|
179 |
|
struct vdp_entry { |
180 |
|
unsigned magic; |
181 |
|
#define VDP_ENTRY_MAGIC 0x353eb781 |
182 |
|
enum vdp_action end; // VDP_NULL or VDP_END |
183 |
|
const struct vdp *vdp; |
184 |
|
void *priv; |
185 |
|
VTAILQ_ENTRY(vdp_entry) list; |
186 |
|
uint64_t calls; |
187 |
|
uint64_t bytes_in; |
188 |
|
}; |
189 |
|
|
190 |
|
VTAILQ_HEAD(vdp_entry_s, vdp_entry); |
191 |
|
|
192 |
|
struct vdp_ctx { |
193 |
|
unsigned magic; |
194 |
|
#define VDP_CTX_MAGIC 0xee501df7 |
195 |
|
int retval; // vdpio: error or capacity |
196 |
|
uint64_t bytes_done; // not used with vdpio |
197 |
|
struct vdp_entry_s vdp; |
198 |
|
struct vdp_entry *nxt; // not needed for vdpio |
199 |
|
struct worker *wrk; |
200 |
|
struct vsl_log *vsl; |
201 |
|
// NULL'ed after the first filter has been pushed |
202 |
|
struct objcore *oc; |
203 |
|
// NULL'ed for delivery |
204 |
|
struct http *hp; |
205 |
|
intmax_t *clen; |
206 |
|
// only for vdpio |
207 |
|
vai_hdl vai_hdl; |
208 |
|
struct vscaret *scaret; |
209 |
|
}; |
210 |
|
|
211 |
|
int VDP_bytes(struct vdp_ctx *, enum vdp_action act, const void *, ssize_t); |
212 |
|
|
213 |
|
/* |
214 |
|
* vdpe == NULL: get lesaes from the last layer |
215 |
|
* vdpe != NULL: get leases from the previous layer or storage |
216 |
|
* |
217 |
|
* conversely to VDP_bytes, vdpio calls happen back (delivery) to front (storage) |
218 |
|
* |
219 |
|
* ends up in a tail call to the previous layer to save stack space |
220 |
|
*/ |
221 |
|
static inline int |
222 |
100 |
vdpio_pull(struct vdp_ctx *vdc, struct vdp_entry *vdpe, struct vscarab *scarab) |
223 |
|
{ |
224 |
|
|
225 |
100 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
226 |
|
|
227 |
100 |
if (vdpe == NULL) |
228 |
0 |
vdpe = VTAILQ_LAST(&vdc->vdp, vdp_entry_s); |
229 |
|
else { |
230 |
100 |
CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC); |
231 |
100 |
vdpe = VTAILQ_PREV(vdpe, vdp_entry_s, list); |
232 |
|
} |
233 |
|
|
234 |
100 |
if (vdpe != NULL) |
235 |
66 |
return (vdpe->vdp->io_lease(vdc, vdpe, scarab)); |
236 |
|
else |
237 |
34 |
return (ObjVAIlease(vdc->wrk, vdc->vai_hdl, scarab)); |
238 |
100 |
} |
239 |
|
|
240 |
|
uint64_t VDPIO_Close1(struct vdp_ctx *, struct vdp_entry *vdpe); |
241 |
|
|
242 |
|
/* |
243 |
|
* ============================================================ |
244 |
|
* VDPIO helpers |
245 |
|
*/ |
246 |
|
|
247 |
|
/* |
248 |
|
* l bytes have been written to buf. save these to out and checkpoint buf for |
249 |
|
* the remaining free space |
250 |
|
*/ |
251 |
|
static inline void |
252 |
|
iovec_collect(struct iovec *buf, struct iovec *out, size_t l) |
253 |
|
{ |
254 |
|
if (out->iov_base == NULL) |
255 |
|
out->iov_base = buf->iov_base; |
256 |
|
assert((char *)out->iov_base + out->iov_len == buf->iov_base); |
257 |
|
out->iov_len += l; |
258 |
|
buf->iov_base = (char *)buf->iov_base + l; |
259 |
|
buf->iov_len -= l; |
260 |
|
} |
261 |
|
|
262 |
|
/* |
263 |
|
* return a single lease via the vdc vscaret |
264 |
|
*/ |
265 |
|
static inline |
266 |
|
void vdpio_return_lease(const struct vdp_ctx *vdc, uint64_t lease) |
267 |
|
{ |
268 |
|
struct vscaret *scaret; |
269 |
|
|
270 |
|
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
271 |
|
scaret = vdc->scaret; |
272 |
|
VSCARET_CHECK_NOTNULL(scaret); |
273 |
|
|
274 |
|
if (scaret->used == scaret->capacity) |
275 |
|
ObjVAIreturn(vdc->wrk, vdc->vai_hdl, scaret); |
276 |
|
VSCARET_ADD(scaret, lease); |
277 |
|
} |
278 |
|
|
279 |
|
/* |
280 |
|
* add all leases from the vscarab to the vscaret |
281 |
|
*/ |
282 |
|
static inline |
283 |
|
void vdpio_return_vscarab(const struct vdp_ctx *vdc, struct vscarab *scarab) |
284 |
|
{ |
285 |
|
struct viov *v; |
286 |
|
|
287 |
|
VSCARAB_CHECK_NOTNULL(scarab); |
288 |
|
VSCARAB_FOREACH(v, scarab) |
289 |
|
vdpio_return_lease(vdc, v->lease); |
290 |
|
VSCARAB_INIT(scarab, scarab->capacity); |
291 |
|
} |
292 |
|
|
293 |
|
/* |
294 |
|
* return used up iovs (len == 0) |
295 |
|
* move remaining to the beginning of the scarab |
296 |
|
*/ |
297 |
|
static inline void |
298 |
|
vdpio_consolidate_vscarab(const struct vdp_ctx *vdc, struct vscarab *scarab) |
299 |
|
{ |
300 |
|
struct viov *v, *f = NULL; |
301 |
|
|
302 |
|
VSCARAB_CHECK_NOTNULL(scarab); |
303 |
|
VSCARAB_FOREACH(v, scarab) { |
304 |
|
if (v->iov.iov_len == 0) { |
305 |
|
AN(v->iov.iov_base); |
306 |
|
vdpio_return_lease(vdc, v->lease); |
307 |
|
if (f == NULL) |
308 |
|
f = v; |
309 |
|
continue; |
310 |
|
} |
311 |
|
else if (f == NULL) |
312 |
|
continue; |
313 |
|
memmove(f, v, scarab->used - (v - scarab->s) * sizeof (*v)); |
314 |
|
break; |
315 |
|
} |
316 |
|
if (f != NULL) |
317 |
|
scarab->used = f - scarab->s; |
318 |
|
} |
319 |
|
|
320 |
|
// Lifecycle management in cache_deliver_proc.c |
321 |
|
int VDPIO_Init(struct vdp_ctx *vdc, struct objcore *oc, struct ws *ws, |
322 |
|
vai_notify_cb *notify_cb, void *notify_priv, struct vscaret *scaret); |
323 |
|
void VDPIO_Return(const struct vdp_ctx *vdc); |
324 |
|
void VDPIO_Fini(struct vdp_ctx *vdc); |
325 |
|
|
326 |
|
void v_deprecated_ VRT_AddVDP(VRT_CTX, const struct vdp *); |
327 |
|
void v_deprecated_ VRT_RemoveVDP(VRT_CTX, const struct vdp *); |
328 |
|
|
329 |
|
/* Registry functions -------------------------------------------------*/ |
330 |
|
const char *VRT_AddFilter(VRT_CTX, const struct vfp *, const struct vdp *); |
331 |
|
void VRT_RemoveFilter(VRT_CTX, const struct vfp *, const struct vdp *); |