| | varnish-cache/bin/varnishd/cache/cache_esi_deliver.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2011 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* VED - Varnish Esi Delivery |
30 |
|
*/ |
31 |
|
|
32 |
|
#include "config.h" |
33 |
|
|
34 |
|
#include "cache_varnishd.h" |
35 |
|
|
36 |
|
#include <stdlib.h> |
37 |
|
|
38 |
|
#include "cache_transport.h" |
39 |
|
#include "cache_filter.h" |
40 |
|
#include "cache_vgz.h" |
41 |
|
|
42 |
|
#include "vct.h" |
43 |
|
#include "vtim.h" |
44 |
|
#include "cache_esi.h" |
45 |
|
#include "vend.h" |
46 |
|
#include "vgz.h" |
47 |
|
|
48 |
|
static vtr_deliver_f ved_deliver; |
49 |
|
static vtr_reembark_f ved_reembark; |
50 |
|
|
51 |
|
static const uint8_t gzip_hdr[] = { |
52 |
|
0x1f, 0x8b, 0x08, |
53 |
|
0x00, 0x00, 0x00, 0x00, |
54 |
|
0x00, |
55 |
|
0x02, 0x03 |
56 |
|
}; |
57 |
|
|
58 |
|
struct ecx { |
59 |
|
unsigned magic; |
60 |
|
#define ECX_MAGIC 0x0b0f9163 |
61 |
|
const uint8_t *p; |
62 |
|
const uint8_t *e; |
63 |
|
int state; |
64 |
|
ssize_t l; |
65 |
|
int isgzip; |
66 |
|
int woken; |
67 |
|
int abrt; |
68 |
|
|
69 |
|
struct req *preq; |
70 |
|
struct ecx *pecx; |
71 |
|
ssize_t l_crc; |
72 |
|
uint32_t crc; |
73 |
|
}; |
74 |
|
|
75 |
|
static int v_matchproto_(vtr_minimal_response_f) |
76 |
0 |
ved_minimal_response(struct req *req, uint16_t status) |
77 |
|
{ |
78 |
0 |
(void)req; |
79 |
0 |
(void)status; |
80 |
0 |
WRONG("esi:includes should not try minimal responses"); |
81 |
0 |
} |
82 |
|
|
83 |
|
static const struct transport VED_transport = { |
84 |
|
.magic = TRANSPORT_MAGIC, |
85 |
|
.name = "ESI_INCLUDE", |
86 |
|
.deliver = ved_deliver, |
87 |
|
.reembark = ved_reembark, |
88 |
|
.minimal_response = ved_minimal_response, |
89 |
|
}; |
90 |
|
|
91 |
|
/*--------------------------------------------------------------------*/ |
92 |
|
|
93 |
|
static void v_matchproto_(vtr_reembark_f) |
94 |
41 |
ved_reembark(struct worker *wrk, struct req *req) |
95 |
|
{ |
96 |
|
struct ecx *ecx; |
97 |
|
|
98 |
41 |
(void)wrk; |
99 |
41 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
100 |
41 |
CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); |
101 |
41 |
Lck_Lock(&req->sp->mtx); |
102 |
41 |
ecx->woken = 1; |
103 |
41 |
PTOK(pthread_cond_signal(&ecx->preq->wrk->cond)); |
104 |
41 |
Lck_Unlock(&req->sp->mtx); |
105 |
41 |
} |
106 |
|
|
107 |
|
/*--------------------------------------------------------------------*/ |
108 |
|
|
109 |
|
static void |
110 |
17280 |
ved_include(struct req *preq, const char *src, const char *host, |
111 |
|
struct ecx *ecx) |
112 |
|
{ |
113 |
|
struct worker *wrk; |
114 |
|
struct sess *sp; |
115 |
|
struct req *req; |
116 |
|
enum req_fsm_nxt s; |
117 |
|
|
118 |
17280 |
CHECK_OBJ_NOTNULL(preq, REQ_MAGIC); |
119 |
17280 |
CHECK_OBJ_NOTNULL(preq->top, REQTOP_MAGIC); |
120 |
17280 |
sp = preq->sp; |
121 |
17280 |
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); |
122 |
17280 |
CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC); |
123 |
17280 |
wrk = preq->wrk; |
124 |
|
|
125 |
17280 |
if (preq->esi_level >= cache_param->max_esi_depth) { |
126 |
5280 |
VSLb(preq->vsl, SLT_VCL_Error, |
127 |
|
"ESI depth limit reached (param max_esi_depth = %u)", |
128 |
2640 |
cache_param->max_esi_depth); |
129 |
2640 |
if (ecx->abrt) |
130 |
40 |
preq->top->topreq->vdc->retval = -1; |
131 |
2640 |
return; |
132 |
|
} |
133 |
|
|
134 |
14640 |
req = Req_New(sp); |
135 |
14640 |
AN(req); |
136 |
14640 |
THR_SetRequest(req); |
137 |
14640 |
assert(IS_NO_VXID(req->vsl->wid)); |
138 |
14640 |
req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER); |
139 |
|
|
140 |
14640 |
wrk->stats->esi_req++; |
141 |
14640 |
req->esi_level = preq->esi_level + 1; |
142 |
|
|
143 |
29280 |
VSLb(req->vsl, SLT_Begin, "req %ju esi %u", |
144 |
14640 |
(uintmax_t)VXID(preq->vsl->wid), req->esi_level); |
145 |
29280 |
VSLb(preq->vsl, SLT_Link, "req %ju esi %u", |
146 |
14640 |
(uintmax_t)VXID(req->vsl->wid), req->esi_level); |
147 |
|
|
148 |
14640 |
VSLb_ts_req(req, "Start", W_TIM_real(wrk)); |
149 |
|
|
150 |
14640 |
memset(req->top, 0, sizeof *req->top); |
151 |
14640 |
req->top = preq->top; |
152 |
|
|
153 |
14640 |
HTTP_Setup(req->http, req->ws, req->vsl, SLT_ReqMethod); |
154 |
14640 |
HTTP_Dup(req->http, preq->http0); |
155 |
|
|
156 |
14640 |
http_SetH(req->http, HTTP_HDR_URL, src); |
157 |
14640 |
if (host != NULL && *host != '\0') { |
158 |
80 |
http_Unset(req->http, H_Host); |
159 |
80 |
http_SetHeader(req->http, host); |
160 |
80 |
} |
161 |
|
|
162 |
14640 |
http_ForceField(req->http, HTTP_HDR_METHOD, "GET"); |
163 |
14640 |
http_ForceField(req->http, HTTP_HDR_PROTO, "HTTP/1.1"); |
164 |
|
|
165 |
|
/* Don't allow conditionals, we can't use a 304 */ |
166 |
14640 |
http_Unset(req->http, H_If_Modified_Since); |
167 |
14640 |
http_Unset(req->http, H_If_None_Match); |
168 |
|
|
169 |
|
/* Don't allow Range */ |
170 |
14640 |
http_Unset(req->http, H_Range); |
171 |
|
|
172 |
|
/* Set Accept-Encoding according to what we want */ |
173 |
14640 |
if (ecx->isgzip) |
174 |
5160 |
http_ForceHeader(req->http, H_Accept_Encoding, "gzip"); |
175 |
|
else |
176 |
9480 |
http_Unset(req->http, H_Accept_Encoding); |
177 |
|
|
178 |
|
/* Client content already taken care of */ |
179 |
14640 |
http_Unset(req->http, H_Content_Length); |
180 |
14640 |
http_Unset(req->http, H_Transfer_Encoding); |
181 |
14640 |
req->req_body_status = BS_NONE; |
182 |
|
|
183 |
14640 |
AZ(req->vcl); |
184 |
14640 |
AN(req->top); |
185 |
14640 |
if (req->top->vcl0) |
186 |
80 |
req->vcl = req->top->vcl0; |
187 |
|
else |
188 |
14560 |
req->vcl = preq->vcl; |
189 |
14640 |
VCL_Ref(req->vcl); |
190 |
|
|
191 |
14640 |
assert(req->req_step == R_STP_TRANSPORT); |
192 |
14640 |
req->t_req = preq->t_req; |
193 |
|
|
194 |
14640 |
req->transport = &VED_transport; |
195 |
14640 |
req->transport_priv = ecx; |
196 |
|
|
197 |
14640 |
VCL_TaskEnter(req->privs); |
198 |
|
|
199 |
14681 |
while (1) { |
200 |
14681 |
CNT_Embark(wrk, req); |
201 |
14681 |
ecx->woken = 0; |
202 |
14681 |
s = CNT_Request(req); |
203 |
14681 |
if (s == REQ_FSM_DONE) |
204 |
14640 |
break; |
205 |
41 |
DSL(DBG_WAITINGLIST, req->vsl->wid, |
206 |
|
"waiting for ESI (%d)", (int)s); |
207 |
41 |
assert(s == REQ_FSM_DISEMBARK); |
208 |
41 |
Lck_Lock(&sp->mtx); |
209 |
41 |
if (!ecx->woken) |
210 |
41 |
(void)Lck_CondWait(&ecx->preq->wrk->cond, &sp->mtx); |
211 |
41 |
Lck_Unlock(&sp->mtx); |
212 |
41 |
AZ(req->wrk); |
213 |
|
} |
214 |
|
|
215 |
14640 |
VCL_Rel(&req->vcl); |
216 |
|
|
217 |
14640 |
req->wrk = NULL; |
218 |
14640 |
THR_SetRequest(preq); |
219 |
|
|
220 |
14640 |
Req_Cleanup(sp, wrk, req); |
221 |
14640 |
Req_Release(req); |
222 |
17280 |
} |
223 |
|
|
224 |
|
/*--------------------------------------------------------------------*/ |
225 |
|
|
226 |
|
//#define Debug(fmt, ...) printf(fmt, __VA_ARGS__) |
227 |
|
#define Debug(fmt, ...) /**/ |
228 |
|
|
229 |
|
static ssize_t |
230 |
60718 |
ved_decode_len(struct vsl_log *vsl, const uint8_t **pp) |
231 |
|
{ |
232 |
|
const uint8_t *p; |
233 |
|
ssize_t l; |
234 |
|
|
235 |
60718 |
p = *pp; |
236 |
60718 |
switch (*p & 15) { |
237 |
|
case 1: |
238 |
52998 |
l = p[1]; |
239 |
52998 |
p += 2; |
240 |
52998 |
break; |
241 |
|
case 2: |
242 |
7640 |
l = vbe16dec(p + 1); |
243 |
7640 |
p += 3; |
244 |
7640 |
break; |
245 |
|
case 8: |
246 |
80 |
l = vbe64dec(p + 1); |
247 |
80 |
p += 9; |
248 |
80 |
break; |
249 |
|
default: |
250 |
0 |
VSLb(vsl, SLT_Error, |
251 |
0 |
"ESI-corruption: Illegal Length %d %d\n", *p, (*p & 15)); |
252 |
0 |
WRONG("ESI-codes: illegal length"); |
253 |
0 |
} |
254 |
60718 |
*pp = p; |
255 |
60718 |
assert(l > 0); |
256 |
60718 |
return (l); |
257 |
|
} |
258 |
|
|
259 |
|
/*--------------------------------------------------------------------- |
260 |
|
*/ |
261 |
|
|
262 |
|
static int v_matchproto_(vdp_init_f) |
263 |
8320 |
ved_vdp_esi_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) |
264 |
|
{ |
265 |
|
struct ecx *ecx; |
266 |
|
|
267 |
8320 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
268 |
8320 |
CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC); |
269 |
8320 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
270 |
8320 |
CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); |
271 |
8320 |
CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); |
272 |
8320 |
AN(vdc->clen); |
273 |
8320 |
AN(priv); |
274 |
|
|
275 |
8320 |
AZ(*priv); |
276 |
8320 |
if (vdc->oc == NULL || !ObjHasAttr(vdc->wrk, vdc->oc, OA_ESIDATA)) |
277 |
0 |
return (1); |
278 |
|
|
279 |
8320 |
if (ctx->req == NULL) { |
280 |
0 |
VSLb(vdc->vsl, SLT_Error, |
281 |
|
"esi can only be used on the client side"); |
282 |
0 |
return (1); |
283 |
|
} |
284 |
|
|
285 |
8320 |
ALLOC_OBJ(ecx, ECX_MAGIC); |
286 |
8320 |
AN(ecx); |
287 |
8320 |
assert(sizeof gzip_hdr == 10); |
288 |
8320 |
ecx->preq = ctx->req; |
289 |
8320 |
*priv = ecx; |
290 |
8320 |
RFC2616_Weaken_Etag(vdc->hp); |
291 |
|
|
292 |
8320 |
ctx->req->res_mode |= RES_ESI; |
293 |
8320 |
if (*vdc->clen != 0) |
294 |
8320 |
*vdc->clen = -1; |
295 |
8320 |
if (ctx->req->esi_level > 0) { |
296 |
3360 |
assert(ctx->req->transport == &VED_transport); |
297 |
3360 |
CAST_OBJ_NOTNULL(ecx->pecx, ctx->req->transport_priv, ECX_MAGIC); |
298 |
3360 |
if (!ecx->pecx->isgzip) |
299 |
640 |
ecx->pecx = NULL; |
300 |
3360 |
} |
301 |
|
|
302 |
8320 |
return (0); |
303 |
8320 |
} |
304 |
|
|
305 |
|
static int v_matchproto_(vdp_fini_f) |
306 |
8320 |
ved_vdp_esi_fini(struct vdp_ctx *vdc, void **priv) |
307 |
|
{ |
308 |
|
struct ecx *ecx; |
309 |
|
|
310 |
8320 |
(void)vdc; |
311 |
8320 |
TAKE_OBJ_NOTNULL(ecx, priv, ECX_MAGIC); |
312 |
8320 |
FREE_OBJ(ecx); |
313 |
8320 |
return (0); |
314 |
|
} |
315 |
|
|
316 |
|
static int v_matchproto_(vdp_bytes_f) |
317 |
10560 |
ved_vdp_esi_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
318 |
|
const void *ptr, ssize_t len) |
319 |
|
{ |
320 |
|
const uint8_t *q, *r; |
321 |
10560 |
ssize_t l = 0; |
322 |
10560 |
uint32_t icrc = 0; |
323 |
|
uint8_t tailbuf[8 + 5]; |
324 |
|
const uint8_t *pp; |
325 |
|
struct ecx *ecx; |
326 |
10560 |
int retval = 0; |
327 |
|
|
328 |
10560 |
if (act == VDP_END) |
329 |
8280 |
act = VDP_FLUSH; |
330 |
|
|
331 |
10560 |
AN(priv); |
332 |
10560 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
333 |
10560 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
334 |
10560 |
pp = ptr; |
335 |
|
|
336 |
143318 |
while (1) { |
337 |
143318 |
switch (ecx->state) { |
338 |
|
case 0: |
339 |
8280 |
ecx->p = ObjGetAttr(vdc->wrk, ecx->preq->objcore, |
340 |
|
OA_ESIDATA, &l); |
341 |
8280 |
AN(ecx->p); |
342 |
8280 |
assert(l > 0); |
343 |
8280 |
ecx->e = ecx->p + l; |
344 |
|
|
345 |
8280 |
if (*ecx->p == VEC_GZ) { |
346 |
4280 |
if (ecx->pecx == NULL) |
347 |
1640 |
retval = VDP_bytes(vdc, VDP_NULL, |
348 |
|
gzip_hdr, 10); |
349 |
4280 |
ecx->l_crc = 0; |
350 |
4280 |
ecx->crc = crc32(0L, Z_NULL, 0); |
351 |
4280 |
ecx->isgzip = 1; |
352 |
4280 |
ecx->p++; |
353 |
4280 |
} |
354 |
8280 |
ecx->state = 1; |
355 |
8280 |
break; |
356 |
|
case 1: |
357 |
75040 |
if (ecx->p >= ecx->e) { |
358 |
8000 |
ecx->state = 2; |
359 |
8000 |
break; |
360 |
|
} |
361 |
67040 |
switch (*ecx->p) { |
362 |
|
case VEC_V1: |
363 |
|
case VEC_V2: |
364 |
|
case VEC_V8: |
365 |
23320 |
ecx->l = ved_decode_len(vdc->vsl, &ecx->p); |
366 |
23320 |
if (ecx->l < 0) |
367 |
0 |
return (-1); |
368 |
23320 |
if (ecx->isgzip) { |
369 |
10999 |
assert(*ecx->p == VEC_C1 || |
370 |
|
*ecx->p == VEC_C2 || |
371 |
|
*ecx->p == VEC_C8); |
372 |
10999 |
l = ved_decode_len(vdc->vsl, &ecx->p); |
373 |
10999 |
if (l < 0) |
374 |
0 |
return (-1); |
375 |
10999 |
icrc = vbe32dec(ecx->p); |
376 |
10999 |
ecx->p += 4; |
377 |
10999 |
ecx->crc = crc32_combine( |
378 |
10999 |
ecx->crc, icrc, l); |
379 |
10999 |
ecx->l_crc += l; |
380 |
10999 |
} |
381 |
23320 |
ecx->state = 3; |
382 |
23320 |
break; |
383 |
|
case VEC_S1: |
384 |
|
case VEC_S2: |
385 |
|
case VEC_S8: |
386 |
26400 |
ecx->l = ved_decode_len(vdc->vsl, &ecx->p); |
387 |
26400 |
if (ecx->l < 0) |
388 |
0 |
return (-1); |
389 |
|
Debug("SKIP1(%d)\n", (int)ecx->l); |
390 |
26400 |
ecx->state = 4; |
391 |
26400 |
break; |
392 |
|
case VEC_IA: |
393 |
12120 |
ecx->abrt = |
394 |
12120 |
FEATURE(FEATURE_ESI_INCLUDE_ONERROR); |
395 |
|
/* FALLTHROUGH */ |
396 |
|
case VEC_IC: |
397 |
17320 |
ecx->p++; |
398 |
17320 |
q = (void*)strchr((const char*)ecx->p, '\0'); |
399 |
17320 |
AN(q); |
400 |
17320 |
q++; |
401 |
17320 |
r = (void*)strchr((const char*)q, '\0'); |
402 |
17320 |
AN(r); |
403 |
17320 |
if (VDP_bytes(vdc, VDP_FLUSH, NULL, 0)) { |
404 |
40 |
ecx->p = ecx->e; |
405 |
40 |
break; |
406 |
|
} |
407 |
|
Debug("INCL [%s][%s] BEGIN\n", q, ecx->p); |
408 |
34560 |
ved_include(ecx->preq, |
409 |
17280 |
(const char*)q, (const char*)ecx->p, ecx); |
410 |
|
Debug("INCL [%s][%s] END\n", q, ecx->p); |
411 |
17280 |
ecx->p = r + 1; |
412 |
17280 |
break; |
413 |
|
default: |
414 |
0 |
VSLb(vdc->vsl, SLT_Error, |
415 |
|
"ESI corruption line %d 0x%02x [%s]\n", |
416 |
0 |
__LINE__, *ecx->p, ecx->p); |
417 |
0 |
WRONG("ESI-codes: Illegal code"); |
418 |
0 |
} |
419 |
67040 |
break; |
420 |
|
case 2: |
421 |
8000 |
ptr = NULL; |
422 |
8000 |
len = 0; |
423 |
8000 |
if (ecx->isgzip && ecx->pecx == NULL) { |
424 |
|
/* |
425 |
|
* We are bytealigned here, so simply emit |
426 |
|
* a gzip literal block with finish bit set. |
427 |
|
*/ |
428 |
1640 |
tailbuf[0] = 0x01; |
429 |
1640 |
tailbuf[1] = 0x00; |
430 |
1640 |
tailbuf[2] = 0x00; |
431 |
1640 |
tailbuf[3] = 0xff; |
432 |
1640 |
tailbuf[4] = 0xff; |
433 |
|
|
434 |
|
/* Emit CRC32 */ |
435 |
1640 |
vle32enc(tailbuf + 5, ecx->crc); |
436 |
|
|
437 |
|
/* MOD(2^32) length */ |
438 |
1640 |
vle32enc(tailbuf + 9, ecx->l_crc); |
439 |
|
|
440 |
1640 |
ptr = tailbuf; |
441 |
1640 |
len = 13; |
442 |
8000 |
} else if (ecx->pecx != NULL) { |
443 |
5440 |
ecx->pecx->crc = crc32_combine(ecx->pecx->crc, |
444 |
2720 |
ecx->crc, ecx->l_crc); |
445 |
2720 |
ecx->pecx->l_crc += ecx->l_crc; |
446 |
2720 |
} |
447 |
8000 |
retval = VDP_bytes(vdc, VDP_END, ptr, len); |
448 |
8000 |
ecx->state = 99; |
449 |
8000 |
return (retval); |
450 |
|
case 3: |
451 |
|
case 4: |
452 |
|
/* |
453 |
|
* There is no guarantee that the 'l' bytes are all |
454 |
|
* in the same storage segment, so loop over storage |
455 |
|
* until we have processed them all. |
456 |
|
*/ |
457 |
51598 |
if (ecx->l <= len) { |
458 |
49718 |
if (ecx->state == 3) |
459 |
46640 |
retval = VDP_bytes(vdc, act, |
460 |
23320 |
pp, ecx->l); |
461 |
49718 |
len -= ecx->l; |
462 |
49718 |
pp += ecx->l; |
463 |
49718 |
ecx->state = 1; |
464 |
49718 |
break; |
465 |
|
} |
466 |
1880 |
if (ecx->state == 3 && len > 0) |
467 |
680 |
retval = VDP_bytes(vdc, act, pp, len); |
468 |
1880 |
ecx->l -= len; |
469 |
1880 |
return (retval); |
470 |
|
case 99: |
471 |
|
/* |
472 |
|
* VEP does not account for the PAD+CRC+LEN |
473 |
|
* so we can see up to approx 15 bytes here. |
474 |
|
*/ |
475 |
400 |
return (retval); |
476 |
|
default: |
477 |
0 |
WRONG("FOO"); |
478 |
0 |
break; |
479 |
|
} |
480 |
133038 |
if (retval) |
481 |
280 |
return (retval); |
482 |
|
} |
483 |
10560 |
} |
484 |
|
|
485 |
|
const struct vdp VDP_esi = { |
486 |
|
.name = "esi", |
487 |
|
.init = ved_vdp_esi_init, |
488 |
|
.bytes = ved_vdp_esi_bytes, |
489 |
|
.fini = ved_vdp_esi_fini, |
490 |
|
}; |
491 |
|
|
492 |
|
/* |
493 |
|
* Account body bytes on req |
494 |
|
* Push bytes to preq |
495 |
|
*/ |
496 |
|
static inline int |
497 |
88280 |
ved_bytes(struct ecx *ecx, enum vdp_action act, |
498 |
|
const void *ptr, ssize_t len) |
499 |
|
{ |
500 |
88280 |
if (act == VDP_END) |
501 |
14800 |
act = VDP_FLUSH; |
502 |
88280 |
return (VDP_bytes(ecx->preq->vdc, act, ptr, len)); |
503 |
|
} |
504 |
|
|
505 |
|
/*--------------------------------------------------------------------- |
506 |
|
* If a gzipped ESI object includes a ungzipped object, we need to make |
507 |
|
* it looked like a gzipped data stream. The official way to do so would |
508 |
|
* be to fire up libvgz and gzip it, but we don't, we fake it. |
509 |
|
* |
510 |
|
* First, we cannot know if it is ungzipped on purpose, the admin may |
511 |
|
* know something we don't. |
512 |
|
* |
513 |
|
* What do you mean "BS ?" |
514 |
|
* |
515 |
|
* All right then... |
516 |
|
* |
517 |
|
* The matter of the fact is that we simply will not fire up a gzip in |
518 |
|
* the output path because it costs too much memory and CPU, so we simply |
519 |
|
* wrap the data in very convenient "gzip copy-blocks" and send it down |
520 |
|
* the stream with a bit more overhead. |
521 |
|
*/ |
522 |
|
|
523 |
|
static int v_matchproto_(vdp_fini_f) |
524 |
720 |
ved_pretend_gzip_fini(struct vdp_ctx *vdc, void **priv) |
525 |
|
{ |
526 |
720 |
(void)vdc; |
527 |
720 |
*priv = NULL; |
528 |
720 |
return (0); |
529 |
|
} |
530 |
|
|
531 |
|
static int v_matchproto_(vdp_bytes_f) |
532 |
2000 |
ved_pretend_gzip_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
533 |
|
const void *pv, ssize_t l) |
534 |
|
{ |
535 |
|
uint8_t buf1[5], buf2[5]; |
536 |
|
const uint8_t *p; |
537 |
|
uint16_t lx; |
538 |
|
struct ecx *ecx; |
539 |
|
|
540 |
2000 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
541 |
2000 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
542 |
|
|
543 |
2000 |
(void)priv; |
544 |
2000 |
if (l == 0) |
545 |
400 |
return (ved_bytes(ecx, act, pv, l)); |
546 |
|
|
547 |
1600 |
p = pv; |
548 |
|
|
549 |
1600 |
AN (ecx->isgzip); |
550 |
1600 |
ecx->crc = crc32(ecx->crc, p, l); |
551 |
1600 |
ecx->l_crc += l; |
552 |
|
|
553 |
|
/* |
554 |
|
* buf1 can safely be emitted multiple times for objects longer |
555 |
|
* than 64K-1 bytes. |
556 |
|
*/ |
557 |
1600 |
lx = 65535; |
558 |
1600 |
buf1[0] = 0; |
559 |
1600 |
vle16enc(buf1 + 1, lx); |
560 |
1600 |
vle16enc(buf1 + 3, ~lx); |
561 |
|
|
562 |
3200 |
while (l > 0) { |
563 |
1600 |
if (l >= 65535) { |
564 |
0 |
lx = 65535; |
565 |
0 |
if (ved_bytes(ecx, VDP_NULL, buf1, sizeof buf1)) |
566 |
0 |
return (-1); |
567 |
0 |
} else { |
568 |
1600 |
lx = (uint16_t)l; |
569 |
1600 |
buf2[0] = 0; |
570 |
1600 |
vle16enc(buf2 + 1, lx); |
571 |
1600 |
vle16enc(buf2 + 3, ~lx); |
572 |
1600 |
if (ved_bytes(ecx, VDP_NULL, buf2, sizeof buf2)) |
573 |
0 |
return (-1); |
574 |
|
} |
575 |
1600 |
if (ved_bytes(ecx, VDP_NULL, p, lx)) |
576 |
0 |
return (-1); |
577 |
1600 |
l -= lx; |
578 |
1600 |
p += lx; |
579 |
|
} |
580 |
|
/* buf1 & buf2 are local, so we have to flush */ |
581 |
1600 |
return (ved_bytes(ecx, VDP_FLUSH, NULL, 0)); |
582 |
2000 |
} |
583 |
|
|
584 |
|
static const struct vdp ved_pretend_gz = { |
585 |
|
.name = "PGZ", |
586 |
|
.bytes = ved_pretend_gzip_bytes, |
587 |
|
.fini = ved_pretend_gzip_fini, |
588 |
|
}; |
589 |
|
|
590 |
|
/*--------------------------------------------------------------------- |
591 |
|
* Include a gzipped object in a gzipped ESI object delivery |
592 |
|
* |
593 |
|
* This is the interesting case: Deliver all the deflate blocks, stripping |
594 |
|
* the "LAST" bit of the last one and padding it, as necessary, to a byte |
595 |
|
* boundary. |
596 |
|
* |
597 |
|
*/ |
598 |
|
|
599 |
|
struct ved_foo { |
600 |
|
unsigned magic; |
601 |
|
#define VED_FOO_MAGIC 0x6a5a262d |
602 |
|
struct ecx *ecx; |
603 |
|
struct objcore *objcore; |
604 |
|
uint64_t start, last, stop, lpad; |
605 |
|
ssize_t ll; |
606 |
|
uint64_t olen; |
607 |
|
uint8_t dbits[8]; |
608 |
|
uint8_t tailbuf[8]; |
609 |
|
}; |
610 |
|
|
611 |
|
static int v_matchproto_(vdp_init_f) |
612 |
1640 |
ved_gzgz_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) |
613 |
|
{ |
614 |
|
ssize_t l; |
615 |
|
const char *p; |
616 |
|
struct ved_foo *foo; |
617 |
|
|
618 |
1640 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
619 |
1640 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
620 |
1640 |
AN(priv); |
621 |
|
|
622 |
1640 |
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC); |
623 |
1640 |
CHECK_OBJ_NOTNULL(foo->objcore, OBJCORE_MAGIC); |
624 |
|
|
625 |
1640 |
memset(foo->tailbuf, 0xdd, sizeof foo->tailbuf); |
626 |
|
|
627 |
1640 |
AN(ObjCheckFlag(vdc->wrk, foo->objcore, OF_GZIPED)); |
628 |
|
|
629 |
1640 |
p = ObjGetAttr(vdc->wrk, foo->objcore, OA_GZIPBITS, &l); |
630 |
1640 |
AN(p); |
631 |
1640 |
assert(l == 32); |
632 |
1640 |
foo->start = vbe64dec(p); |
633 |
1640 |
foo->last = vbe64dec(p + 8); |
634 |
1640 |
foo->stop = vbe64dec(p + 16); |
635 |
1640 |
foo->olen = ObjGetLen(vdc->wrk, foo->objcore); |
636 |
1640 |
assert(foo->start > 0 && foo->start < foo->olen * 8); |
637 |
1640 |
assert(foo->last > 0 && foo->last < foo->olen * 8); |
638 |
1640 |
assert(foo->stop > 0 && foo->stop < foo->olen * 8); |
639 |
1640 |
assert(foo->last >= foo->start); |
640 |
1640 |
assert(foo->last < foo->stop); |
641 |
|
|
642 |
|
/* The start bit must be byte aligned. */ |
643 |
1640 |
AZ(foo->start & 7); |
644 |
1640 |
return (0); |
645 |
|
} |
646 |
|
|
647 |
|
/* |
648 |
|
* XXX: for act == VDP_END || act == VDP_FLUSH, we send a flush more often than |
649 |
|
* we need. The VDP_END case would trip our "at most one VDP_END call" assertion |
650 |
|
* in VDP_bytes(), but ved_bytes() covers it. |
651 |
|
* |
652 |
|
* To avoid unnecessary chunks downstream, it would be nice to re-structure the |
653 |
|
* code to identify the last block, send VDP_END/VDP_FLUSH for that one and |
654 |
|
* VDP_NULL for anything before it. |
655 |
|
*/ |
656 |
|
|
657 |
|
static int v_matchproto_(vdp_bytes_f) |
658 |
2400 |
ved_gzgz_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
659 |
|
const void *ptr, ssize_t len) |
660 |
|
{ |
661 |
|
struct ved_foo *foo; |
662 |
|
const uint8_t *pp; |
663 |
|
ssize_t dl; |
664 |
|
ssize_t l; |
665 |
|
|
666 |
2400 |
(void)vdc; |
667 |
2400 |
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC); |
668 |
2400 |
pp = ptr; |
669 |
2400 |
if (len > 0) { |
670 |
|
/* Skip over the GZIP header */ |
671 |
2400 |
dl = foo->start / 8 - foo->ll; |
672 |
2400 |
if (dl > 0) { |
673 |
|
/* Before foo.start, skip */ |
674 |
1760 |
if (dl > len) |
675 |
120 |
dl = len; |
676 |
1760 |
foo->ll += dl; |
677 |
1760 |
len -= dl; |
678 |
1760 |
pp += dl; |
679 |
1760 |
} |
680 |
2400 |
} |
681 |
2400 |
if (len > 0) { |
682 |
|
/* The main body of the object */ |
683 |
2280 |
dl = foo->last / 8 - foo->ll; |
684 |
2280 |
if (dl > 0) { |
685 |
720 |
dl = vmin(dl, len); |
686 |
720 |
if (ved_bytes(foo->ecx, act, pp, dl)) |
687 |
0 |
return (-1); |
688 |
720 |
foo->ll += dl; |
689 |
720 |
len -= dl; |
690 |
720 |
pp += dl; |
691 |
720 |
} |
692 |
2280 |
} |
693 |
2400 |
if (len > 0 && foo->ll == foo->last / 8) { |
694 |
|
/* Remove the "LAST" bit */ |
695 |
1640 |
foo->dbits[0] = *pp; |
696 |
1640 |
foo->dbits[0] &= ~(1U << (foo->last & 7)); |
697 |
1640 |
if (ved_bytes(foo->ecx, act, foo->dbits, 1)) |
698 |
0 |
return (-1); |
699 |
1640 |
foo->ll++; |
700 |
1640 |
len--; |
701 |
1640 |
pp++; |
702 |
1640 |
} |
703 |
2400 |
if (len > 0) { |
704 |
|
/* Last block */ |
705 |
2200 |
dl = foo->stop / 8 - foo->ll; |
706 |
2200 |
if (dl > 0) { |
707 |
1080 |
dl = vmin(dl, len); |
708 |
1080 |
if (ved_bytes(foo->ecx, act, pp, dl)) |
709 |
0 |
return (-1); |
710 |
1080 |
foo->ll += dl; |
711 |
1080 |
len -= dl; |
712 |
1080 |
pp += dl; |
713 |
1080 |
} |
714 |
2200 |
} |
715 |
2400 |
if (len > 0 && (foo->stop & 7) && foo->ll == foo->stop / 8) { |
716 |
|
/* Add alignment to byte boundary */ |
717 |
1320 |
foo->dbits[1] = *pp; |
718 |
1320 |
foo->ll++; |
719 |
1320 |
len--; |
720 |
1320 |
pp++; |
721 |
1320 |
switch ((int)(foo->stop & 7)) { |
722 |
|
case 1: /* |
723 |
|
* x000.... |
724 |
|
* 00000000 00000000 11111111 11111111 |
725 |
|
*/ |
726 |
|
case 3: /* |
727 |
|
* xxx000.. |
728 |
|
* 00000000 00000000 11111111 11111111 |
729 |
|
*/ |
730 |
|
case 5: /* |
731 |
|
* xxxxx000 |
732 |
|
* 00000000 00000000 11111111 11111111 |
733 |
|
*/ |
734 |
240 |
foo->dbits[2] = 0x00; foo->dbits[3] = 0x00; |
735 |
240 |
foo->dbits[4] = 0xff; foo->dbits[5] = 0xff; |
736 |
240 |
foo->lpad = 5; |
737 |
240 |
break; |
738 |
|
case 2: /* xx010000 00000100 00000001 00000000 */ |
739 |
760 |
foo->dbits[1] |= 0x08; |
740 |
760 |
foo->dbits[2] = 0x20; |
741 |
760 |
foo->dbits[3] = 0x80; |
742 |
760 |
foo->dbits[4] = 0x00; |
743 |
760 |
foo->lpad = 4; |
744 |
760 |
break; |
745 |
|
case 4: /* xxxx0100 00000001 00000000 */ |
746 |
80 |
foo->dbits[1] |= 0x20; |
747 |
80 |
foo->dbits[2] = 0x80; |
748 |
80 |
foo->dbits[3] = 0x00; |
749 |
80 |
foo->lpad = 3; |
750 |
80 |
break; |
751 |
|
case 6: /* xxxxxx01 00000000 */ |
752 |
160 |
foo->dbits[1] |= 0x80; |
753 |
160 |
foo->dbits[2] = 0x00; |
754 |
160 |
foo->lpad = 2; |
755 |
160 |
break; |
756 |
|
case 7: /* |
757 |
|
* xxxxxxx0 |
758 |
|
* 00...... |
759 |
|
* 00000000 00000000 11111111 11111111 |
760 |
|
*/ |
761 |
80 |
foo->dbits[2] = 0x00; |
762 |
80 |
foo->dbits[3] = 0x00; foo->dbits[4] = 0x00; |
763 |
80 |
foo->dbits[5] = 0xff; foo->dbits[6] = 0xff; |
764 |
80 |
foo->lpad = 6; |
765 |
80 |
break; |
766 |
0 |
case 0: /* xxxxxxxx */ |
767 |
|
default: |
768 |
0 |
WRONG("compiler must be broken"); |
769 |
0 |
} |
770 |
1320 |
if (ved_bytes(foo->ecx, act, foo->dbits + 1, foo->lpad)) |
771 |
0 |
return (-1); |
772 |
1320 |
} |
773 |
2400 |
if (len > 0) { |
774 |
|
/* Recover GZIP tail */ |
775 |
1960 |
dl = foo->olen - foo->ll; |
776 |
1960 |
assert(dl >= 0); |
777 |
1960 |
if (dl > len) |
778 |
320 |
dl = len; |
779 |
1960 |
if (dl > 0) { |
780 |
1960 |
assert(dl <= 8); |
781 |
1960 |
l = foo->ll - (foo->olen - 8); |
782 |
1960 |
assert(l >= 0); |
783 |
1960 |
assert(l <= 8); |
784 |
1960 |
assert(l + dl <= 8); |
785 |
1960 |
memcpy(foo->tailbuf + l, pp, dl); |
786 |
1960 |
foo->ll += dl; |
787 |
1960 |
len -= dl; |
788 |
1960 |
} |
789 |
1960 |
} |
790 |
2400 |
assert(len == 0); |
791 |
2400 |
return (0); |
792 |
2400 |
} |
793 |
|
|
794 |
|
static int v_matchproto_(vdp_fini_f) |
795 |
1640 |
ved_gzgz_fini(struct vdp_ctx *vdc, void **priv) |
796 |
|
{ |
797 |
|
uint32_t icrc; |
798 |
|
uint32_t ilen; |
799 |
|
struct ved_foo *foo; |
800 |
|
|
801 |
1640 |
(void)vdc; |
802 |
1640 |
TAKE_OBJ_NOTNULL(foo, priv, VED_FOO_MAGIC); |
803 |
|
|
804 |
|
/* XXX |
805 |
|
* this works due to the esi layering, a VDP pushing bytes from _fini |
806 |
|
* will otherwise have its own _bytes method called. |
807 |
|
* |
808 |
|
* Could rewrite use VDP_END |
809 |
|
*/ |
810 |
1640 |
(void)ved_bytes(foo->ecx, VDP_FLUSH, NULL, 0); |
811 |
|
|
812 |
1640 |
icrc = vle32dec(foo->tailbuf); |
813 |
1640 |
ilen = vle32dec(foo->tailbuf + 4); |
814 |
1640 |
foo->ecx->crc = crc32_combine(foo->ecx->crc, icrc, ilen); |
815 |
1640 |
foo->ecx->l_crc += ilen; |
816 |
|
|
817 |
1640 |
return (0); |
818 |
|
} |
819 |
|
|
820 |
|
static const struct vdp ved_gzgz = { |
821 |
|
.name = "VZZ", |
822 |
|
.init = ved_gzgz_init, |
823 |
|
.bytes = ved_gzgz_bytes, |
824 |
|
.fini = ved_gzgz_fini, |
825 |
|
}; |
826 |
|
|
827 |
|
/*-------------------------------------------------------------------- |
828 |
|
* Straight through without processing. |
829 |
|
*/ |
830 |
|
|
831 |
|
static int v_matchproto_(vdp_fini_f) |
832 |
11520 |
ved_vdp_fini(struct vdp_ctx *vdc, void **priv) |
833 |
|
{ |
834 |
11520 |
(void)vdc; |
835 |
11520 |
*priv = NULL; |
836 |
11520 |
return (0); |
837 |
|
} |
838 |
|
|
839 |
|
static int v_matchproto_(vdp_bytes_f) |
840 |
76680 |
ved_vdp_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
841 |
|
const void *ptr, ssize_t len) |
842 |
|
{ |
843 |
|
struct ecx *ecx; |
844 |
|
|
845 |
76680 |
(void)vdc; |
846 |
76680 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
847 |
76680 |
return (ved_bytes(ecx, act, ptr, len)); |
848 |
|
} |
849 |
|
|
850 |
|
static const struct vdp ved_ved = { |
851 |
|
.name = "VED", |
852 |
|
.bytes = ved_vdp_bytes, |
853 |
|
.fini = ved_vdp_fini, |
854 |
|
}; |
855 |
|
|
856 |
|
static void |
857 |
14640 |
ved_close(struct req *req, int error) |
858 |
|
{ |
859 |
14640 |
req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc); |
860 |
|
|
861 |
14640 |
if (! error) |
862 |
14520 |
return; |
863 |
120 |
req->top->topreq->vdc->retval = -1; |
864 |
120 |
req->top->topreq->doclose = req->doclose; |
865 |
14640 |
} |
866 |
|
|
867 |
|
/*--------------------------------------------------------------------*/ |
868 |
|
|
869 |
|
static enum vtr_deliver_e v_matchproto_(vtr_deliver_f) |
870 |
14640 |
ved_deliver(struct req *req, int wantbody) |
871 |
|
{ |
872 |
14640 |
int i = 0; |
873 |
|
const char *p; |
874 |
|
uint16_t status; |
875 |
|
struct ecx *ecx; |
876 |
|
struct ved_foo foo[1]; |
877 |
|
struct vrt_ctx ctx[1]; |
878 |
|
|
879 |
14640 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
880 |
14640 |
CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC); |
881 |
14640 |
CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); |
882 |
|
|
883 |
14640 |
CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); |
884 |
|
|
885 |
14640 |
status = req->resp->status % 1000; |
886 |
|
|
887 |
14800 |
if (FEATURE(FEATURE_ESI_INCLUDE_ONERROR) && |
888 |
2760 |
status != 200 && status != 204) { |
889 |
160 |
ved_close(req, ecx->abrt); |
890 |
160 |
return (VTR_D_DONE); |
891 |
|
} |
892 |
|
|
893 |
14480 |
if (wantbody == 0) { |
894 |
520 |
ved_close(req, 0); |
895 |
520 |
return (VTR_D_DONE); |
896 |
|
} |
897 |
|
|
898 |
13960 |
if (req->boc == NULL && ObjGetLen(req->wrk, req->objcore) == 0) { |
899 |
0 |
ved_close(req, 0); |
900 |
0 |
return (VTR_D_DONE); |
901 |
|
} |
902 |
|
|
903 |
13960 |
if (http_GetHdr(req->resp, H_Content_Encoding, &p)) |
904 |
4320 |
i = http_coding_eq(p, gzip); |
905 |
13960 |
if (i) |
906 |
4320 |
i = ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED); |
907 |
|
|
908 |
13960 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
909 |
13960 |
VCL_Req2Ctx(ctx, req); |
910 |
|
|
911 |
13960 |
if (ecx->isgzip && i && !(req->res_mode & RES_ESI)) { |
912 |
|
/* A gzipped include which is not ESI processed */ |
913 |
|
|
914 |
|
/* OA_GZIPBITS are not valid until BOS_FINISHED */ |
915 |
1680 |
if (req->boc != NULL) |
916 |
560 |
ObjWaitState(req->objcore, BOS_FINISHED); |
917 |
|
|
918 |
1680 |
if (req->objcore->flags & OC_F_FAILED) { |
919 |
|
/* No way of signalling errors in the middle of |
920 |
|
* the ESI body. Omit this ESI fragment. |
921 |
|
* XXX change error argument to 1 |
922 |
|
*/ |
923 |
40 |
ved_close(req, 0); |
924 |
40 |
return (VTR_D_DONE); |
925 |
|
} |
926 |
|
|
927 |
1640 |
INIT_OBJ(foo, VED_FOO_MAGIC); |
928 |
1640 |
foo->ecx = ecx; |
929 |
1640 |
foo->objcore = req->objcore; |
930 |
1640 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_gzgz, foo); |
931 |
13920 |
} else if (ecx->isgzip && !i) { |
932 |
|
/* Non-Gzip'ed include in gzipped parent */ |
933 |
720 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_pretend_gz, ecx); |
934 |
720 |
} else { |
935 |
|
/* Anything else goes straight through */ |
936 |
11560 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_ved, ecx); |
937 |
|
} |
938 |
|
|
939 |
13920 |
if (i == 0) { |
940 |
13880 |
i = VDP_DeliverObj(req->vdc, req->objcore); |
941 |
13880 |
} else { |
942 |
40 |
VSLb(req->vsl, SLT_Error, "Failure to push ESI processors"); |
943 |
40 |
req->doclose = SC_OVERLOAD; |
944 |
|
} |
945 |
|
|
946 |
13920 |
if (i && req->doclose == SC_NULL) |
947 |
120 |
req->doclose = SC_REM_CLOSE; |
948 |
|
|
949 |
13920 |
ved_close(req, i && ecx->abrt ? 1 : 0); |
950 |
13920 |
return (VTR_D_DONE); |
951 |
14640 |
} |