| | varnish-cache/bin/varnishd/cache/cache_esi_deliver.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2011 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* VED - Varnish Esi Delivery |
30 |
|
*/ |
31 |
|
|
32 |
|
#include "config.h" |
33 |
|
|
34 |
|
#include "cache_varnishd.h" |
35 |
|
|
36 |
|
#include <stdlib.h> |
37 |
|
|
38 |
|
#include "cache_transport.h" |
39 |
|
#include "cache_filter.h" |
40 |
|
#include "cache_vgz.h" |
41 |
|
|
42 |
|
#include "vct.h" |
43 |
|
#include "vtim.h" |
44 |
|
#include "cache_esi.h" |
45 |
|
#include "vend.h" |
46 |
|
#include "vgz.h" |
47 |
|
|
48 |
|
static vtr_deliver_f ved_deliver; |
49 |
|
static vtr_reembark_f ved_reembark; |
50 |
|
|
51 |
|
static const uint8_t gzip_hdr[] = { |
52 |
|
0x1f, 0x8b, 0x08, |
53 |
|
0x00, 0x00, 0x00, 0x00, |
54 |
|
0x00, |
55 |
|
0x02, 0x03 |
56 |
|
}; |
57 |
|
|
58 |
|
struct ecx { |
59 |
|
unsigned magic; |
60 |
|
#define ECX_MAGIC 0x0b0f9163 |
61 |
|
const uint8_t *p; |
62 |
|
const uint8_t *e; |
63 |
|
int state; |
64 |
|
ssize_t l; |
65 |
|
int isgzip; |
66 |
|
int woken; |
67 |
|
int incl_cont; |
68 |
|
|
69 |
|
struct req *preq; |
70 |
|
struct ecx *pecx; |
71 |
|
ssize_t l_crc; |
72 |
|
uint32_t crc; |
73 |
|
}; |
74 |
|
|
75 |
|
static int v_matchproto_(vtr_minimal_response_f) |
76 |
0 |
ved_minimal_response(struct req *req, uint16_t status) |
77 |
|
{ |
78 |
0 |
(void)req; |
79 |
0 |
(void)status; |
80 |
0 |
WRONG("esi:includes should not try minimal responses"); |
81 |
0 |
} |
82 |
|
|
83 |
|
static const struct transport VED_transport = { |
84 |
|
.magic = TRANSPORT_MAGIC, |
85 |
|
.name = "ESI_INCLUDE", |
86 |
|
.deliver = ved_deliver, |
87 |
|
.reembark = ved_reembark, |
88 |
|
.minimal_response = ved_minimal_response, |
89 |
|
}; |
90 |
|
|
91 |
|
/*--------------------------------------------------------------------*/ |
92 |
|
|
93 |
|
static void v_matchproto_(vtr_reembark_f) |
94 |
31 |
ved_reembark(struct worker *wrk, struct req *req) |
95 |
|
{ |
96 |
|
struct ecx *ecx; |
97 |
|
|
98 |
31 |
(void)wrk; |
99 |
31 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
100 |
31 |
CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); |
101 |
31 |
Lck_Lock(&req->sp->mtx); |
102 |
31 |
ecx->woken = 1; |
103 |
31 |
PTOK(pthread_cond_signal(&ecx->preq->wrk->cond)); |
104 |
31 |
Lck_Unlock(&req->sp->mtx); |
105 |
31 |
} |
106 |
|
|
107 |
|
/*--------------------------------------------------------------------*/ |
108 |
|
|
109 |
|
static void |
110 |
10725 |
ved_include(struct req *preq, const char *src, const char *host, |
111 |
|
struct ecx *ecx) |
112 |
|
{ |
113 |
|
struct worker *wrk; |
114 |
|
struct sess *sp; |
115 |
|
struct req *req; |
116 |
|
enum req_fsm_nxt s; |
117 |
|
|
118 |
10725 |
CHECK_OBJ_NOTNULL(preq, REQ_MAGIC); |
119 |
10725 |
CHECK_OBJ_NOTNULL(preq->top, REQTOP_MAGIC); |
120 |
10725 |
sp = preq->sp; |
121 |
10725 |
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); |
122 |
10725 |
CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC); |
123 |
10725 |
wrk = preq->wrk; |
124 |
|
|
125 |
10725 |
if (preq->esi_level >= cache_param->max_esi_depth) { |
126 |
3300 |
VSLb(preq->vsl, SLT_VCL_Error, |
127 |
|
"ESI depth limit reached (param max_esi_depth = %u)", |
128 |
1650 |
cache_param->max_esi_depth); |
129 |
1650 |
if (!ecx->incl_cont) |
130 |
25 |
preq->top->topreq->vdc->retval = -1; |
131 |
1650 |
return; |
132 |
|
} |
133 |
|
|
134 |
9075 |
req = Req_New(sp); |
135 |
9075 |
AN(req); |
136 |
9075 |
THR_SetRequest(req); |
137 |
9075 |
assert(IS_NO_VXID(req->vsl->wid)); |
138 |
9075 |
req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER); |
139 |
|
|
140 |
9075 |
wrk->stats->esi_req++; |
141 |
9075 |
req->esi_level = preq->esi_level + 1; |
142 |
|
|
143 |
18150 |
VSLb(req->vsl, SLT_Begin, "req %ju esi %u", |
144 |
9075 |
(uintmax_t)VXID(preq->vsl->wid), req->esi_level); |
145 |
18150 |
VSLb(preq->vsl, SLT_Link, "req %ju esi %u", |
146 |
9075 |
(uintmax_t)VXID(req->vsl->wid), req->esi_level); |
147 |
|
|
148 |
9075 |
VSLb_ts_req(req, "Start", W_TIM_real(wrk)); |
149 |
|
|
150 |
9075 |
memset(req->top, 0, sizeof *req->top); |
151 |
9075 |
req->top = preq->top; |
152 |
|
|
153 |
9075 |
HTTP_Setup(req->http, req->ws, req->vsl, SLT_ReqMethod); |
154 |
9075 |
HTTP_Dup(req->http, preq->http0); |
155 |
|
|
156 |
9075 |
http_SetH(req->http, HTTP_HDR_URL, src); |
157 |
9075 |
if (host != NULL && *host != '\0') { |
158 |
50 |
http_Unset(req->http, H_Host); |
159 |
50 |
http_SetHeader(req->http, host); |
160 |
50 |
} |
161 |
|
|
162 |
9075 |
http_ForceField(req->http, HTTP_HDR_METHOD, "GET"); |
163 |
9075 |
http_ForceField(req->http, HTTP_HDR_PROTO, "HTTP/1.1"); |
164 |
|
|
165 |
|
/* Don't allow conditionals, we can't use a 304 */ |
166 |
9075 |
http_Unset(req->http, H_If_Modified_Since); |
167 |
9075 |
http_Unset(req->http, H_If_None_Match); |
168 |
|
|
169 |
|
/* Don't allow Range */ |
170 |
9075 |
http_Unset(req->http, H_Range); |
171 |
|
|
172 |
|
/* Set Accept-Encoding according to what we want */ |
173 |
9075 |
if (ecx->isgzip) |
174 |
3225 |
http_ForceHeader(req->http, H_Accept_Encoding, "gzip"); |
175 |
|
else |
176 |
5850 |
http_Unset(req->http, H_Accept_Encoding); |
177 |
|
|
178 |
|
/* Client content already taken care of */ |
179 |
9075 |
http_Unset(req->http, H_Content_Length); |
180 |
9075 |
http_Unset(req->http, H_Transfer_Encoding); |
181 |
9075 |
req->req_body_status = BS_NONE; |
182 |
|
|
183 |
9075 |
AZ(req->vcl); |
184 |
9075 |
AN(req->top); |
185 |
9075 |
if (req->top->vcl0) |
186 |
50 |
req->vcl = req->top->vcl0; |
187 |
|
else |
188 |
9025 |
req->vcl = preq->vcl; |
189 |
9075 |
VCL_Ref(req->vcl); |
190 |
|
|
191 |
9075 |
assert(req->req_step == R_STP_TRANSPORT); |
192 |
9075 |
req->t_req = preq->t_req; |
193 |
|
|
194 |
9075 |
req->transport = &VED_transport; |
195 |
9075 |
req->transport_priv = ecx; |
196 |
|
|
197 |
9075 |
VCL_TaskEnter(req->privs); |
198 |
|
|
199 |
9106 |
while (1) { |
200 |
9106 |
CNT_Embark(wrk, req); |
201 |
9106 |
ecx->woken = 0; |
202 |
9106 |
s = CNT_Request(req); |
203 |
9106 |
if (s == REQ_FSM_DONE) |
204 |
9075 |
break; |
205 |
31 |
DSL(DBG_WAITINGLIST, req->vsl->wid, |
206 |
|
"waiting for ESI (%d)", (int)s); |
207 |
31 |
assert(s == REQ_FSM_DISEMBARK); |
208 |
31 |
Lck_Lock(&sp->mtx); |
209 |
31 |
if (!ecx->woken) |
210 |
31 |
(void)Lck_CondWait(&ecx->preq->wrk->cond, &sp->mtx); |
211 |
31 |
Lck_Unlock(&sp->mtx); |
212 |
31 |
AZ(req->wrk); |
213 |
|
} |
214 |
|
|
215 |
9075 |
VCL_Rel(&req->vcl); |
216 |
|
|
217 |
9075 |
req->wrk = NULL; |
218 |
9075 |
THR_SetRequest(preq); |
219 |
|
|
220 |
9075 |
Req_Cleanup(sp, wrk, req); |
221 |
9075 |
Req_Release(req); |
222 |
10725 |
} |
223 |
|
|
224 |
|
/*--------------------------------------------------------------------*/ |
225 |
|
|
226 |
|
//#define Debug(fmt, ...) printf(fmt, __VA_ARGS__) |
227 |
|
#define Debug(fmt, ...) /**/ |
228 |
|
|
229 |
|
static ssize_t |
230 |
37725 |
ved_decode_len(struct vsl_log *vsl, const uint8_t **pp) |
231 |
|
{ |
232 |
|
const uint8_t *p; |
233 |
|
ssize_t l; |
234 |
|
|
235 |
37725 |
p = *pp; |
236 |
37725 |
switch (*p & 15) { |
237 |
|
case 1: |
238 |
32900 |
l = p[1]; |
239 |
32900 |
p += 2; |
240 |
32900 |
break; |
241 |
|
case 2: |
242 |
4775 |
l = vbe16dec(p + 1); |
243 |
4775 |
p += 3; |
244 |
4775 |
break; |
245 |
|
case 8: |
246 |
50 |
l = vbe64dec(p + 1); |
247 |
50 |
p += 9; |
248 |
50 |
break; |
249 |
|
default: |
250 |
0 |
VSLb(vsl, SLT_Error, |
251 |
0 |
"ESI-corruption: Illegal Length %d %d\n", *p, (*p & 15)); |
252 |
0 |
WRONG("ESI-codes: illegal length"); |
253 |
0 |
} |
254 |
37725 |
*pp = p; |
255 |
37725 |
assert(l > 0); |
256 |
37725 |
return (l); |
257 |
|
} |
258 |
|
|
259 |
|
/*--------------------------------------------------------------------- |
260 |
|
*/ |
261 |
|
|
262 |
|
static int v_matchproto_(vdp_init_f) |
263 |
5125 |
ved_vdp_esi_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, struct objcore *oc) |
264 |
|
{ |
265 |
|
struct ecx *ecx; |
266 |
|
struct req *req; |
267 |
|
|
268 |
5125 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
269 |
5125 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
270 |
5125 |
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); |
271 |
5125 |
if (oc == NULL || !ObjHasAttr(vdc->wrk, oc, OA_ESIDATA)) |
272 |
0 |
return (1); |
273 |
|
|
274 |
5125 |
req = vdc->req; |
275 |
5125 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
276 |
5125 |
AN(priv); |
277 |
5125 |
AZ(*priv); |
278 |
|
|
279 |
5125 |
ALLOC_OBJ(ecx, ECX_MAGIC); |
280 |
5125 |
AN(ecx); |
281 |
5125 |
assert(sizeof gzip_hdr == 10); |
282 |
5125 |
ecx->preq = req; |
283 |
5125 |
*priv = ecx; |
284 |
5125 |
RFC2616_Weaken_Etag(req->resp); |
285 |
|
|
286 |
5125 |
req->res_mode |= RES_ESI; |
287 |
5125 |
if (req->resp_len != 0) |
288 |
5125 |
req->resp_len = -1; |
289 |
5125 |
if (req->esi_level > 0) { |
290 |
2100 |
assert(req->transport == &VED_transport); |
291 |
2100 |
CAST_OBJ_NOTNULL(ecx->pecx, req->transport_priv, ECX_MAGIC); |
292 |
2100 |
if (!ecx->pecx->isgzip) |
293 |
400 |
ecx->pecx = NULL; |
294 |
2100 |
} |
295 |
|
|
296 |
5125 |
return (0); |
297 |
5125 |
} |
298 |
|
|
299 |
|
static int v_matchproto_(vdp_fini_f) |
300 |
5125 |
ved_vdp_esi_fini(struct vdp_ctx *vdc, void **priv) |
301 |
|
{ |
302 |
|
struct ecx *ecx; |
303 |
|
|
304 |
5125 |
(void)vdc; |
305 |
5125 |
TAKE_OBJ_NOTNULL(ecx, priv, ECX_MAGIC); |
306 |
5125 |
FREE_OBJ(ecx); |
307 |
5125 |
return (0); |
308 |
|
} |
309 |
|
|
310 |
|
static int v_matchproto_(vdp_bytes_f) |
311 |
6525 |
ved_vdp_esi_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
312 |
|
const void *ptr, ssize_t len) |
313 |
|
{ |
314 |
|
const uint8_t *q, *r; |
315 |
6525 |
ssize_t l = 0; |
316 |
6525 |
uint32_t icrc = 0; |
317 |
|
uint8_t tailbuf[8 + 5]; |
318 |
|
const uint8_t *pp; |
319 |
|
struct ecx *ecx; |
320 |
6525 |
int retval = 0; |
321 |
|
|
322 |
6525 |
if (act == VDP_END) |
323 |
5100 |
act = VDP_FLUSH; |
324 |
|
|
325 |
6525 |
AN(priv); |
326 |
6525 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
327 |
6525 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
328 |
6525 |
pp = ptr; |
329 |
|
|
330 |
88825 |
while (1) { |
331 |
88825 |
switch (ecx->state) { |
332 |
|
case 0: |
333 |
5100 |
ecx->p = ObjGetAttr(vdc->wrk, ecx->preq->objcore, |
334 |
|
OA_ESIDATA, &l); |
335 |
5100 |
AN(ecx->p); |
336 |
5100 |
assert(l > 0); |
337 |
5100 |
ecx->e = ecx->p + l; |
338 |
|
|
339 |
5100 |
if (*ecx->p == VEC_GZ) { |
340 |
2675 |
if (ecx->pecx == NULL) |
341 |
1025 |
retval = VDP_bytes(vdc, VDP_NULL, |
342 |
|
gzip_hdr, 10); |
343 |
2675 |
ecx->l_crc = 0; |
344 |
2675 |
ecx->crc = crc32(0L, Z_NULL, 0); |
345 |
2675 |
ecx->isgzip = 1; |
346 |
2675 |
ecx->p++; |
347 |
2675 |
} |
348 |
5100 |
ecx->state = 1; |
349 |
5100 |
break; |
350 |
|
case 1: |
351 |
46525 |
if (ecx->p >= ecx->e) { |
352 |
4925 |
ecx->state = 2; |
353 |
4925 |
break; |
354 |
|
} |
355 |
41600 |
switch (*ecx->p) { |
356 |
|
case VEC_V1: |
357 |
|
case VEC_V2: |
358 |
|
case VEC_V8: |
359 |
14425 |
ecx->l = ved_decode_len(vdc->vsl, &ecx->p); |
360 |
14425 |
if (ecx->l < 0) |
361 |
0 |
return (-1); |
362 |
14425 |
if (ecx->isgzip) { |
363 |
6875 |
assert(*ecx->p == VEC_C1 || |
364 |
|
*ecx->p == VEC_C2 || |
365 |
|
*ecx->p == VEC_C8); |
366 |
6875 |
l = ved_decode_len(vdc->vsl, &ecx->p); |
367 |
6875 |
if (l < 0) |
368 |
0 |
return (-1); |
369 |
6875 |
icrc = vbe32dec(ecx->p); |
370 |
6875 |
ecx->p += 4; |
371 |
6875 |
ecx->crc = crc32_combine( |
372 |
6875 |
ecx->crc, icrc, l); |
373 |
6875 |
ecx->l_crc += l; |
374 |
6875 |
} |
375 |
14425 |
ecx->state = 3; |
376 |
14425 |
break; |
377 |
|
case VEC_S1: |
378 |
|
case VEC_S2: |
379 |
|
case VEC_S8: |
380 |
16425 |
ecx->l = ved_decode_len(vdc->vsl, &ecx->p); |
381 |
16425 |
if (ecx->l < 0) |
382 |
0 |
return (-1); |
383 |
|
Debug("SKIP1(%d)\n", (int)ecx->l); |
384 |
16425 |
ecx->state = 4; |
385 |
16425 |
break; |
386 |
|
case VEC_IC: |
387 |
3225 |
ecx->incl_cont = |
388 |
3225 |
FEATURE(FEATURE_ESI_INCLUDE_ONERROR); |
389 |
|
/* FALLTHROUGH */ |
390 |
|
case VEC_IA: |
391 |
10750 |
ecx->p++; |
392 |
10750 |
q = (void*)strchr((const char*)ecx->p, '\0'); |
393 |
10750 |
AN(q); |
394 |
10750 |
q++; |
395 |
10750 |
r = (void*)strchr((const char*)q, '\0'); |
396 |
10750 |
AN(r); |
397 |
10750 |
if (VDP_bytes(vdc, VDP_FLUSH, NULL, 0)) { |
398 |
25 |
ecx->p = ecx->e; |
399 |
25 |
break; |
400 |
|
} |
401 |
|
Debug("INCL [%s][%s] BEGIN\n", q, ecx->p); |
402 |
21450 |
ved_include(ecx->preq, |
403 |
10725 |
(const char*)q, (const char*)ecx->p, ecx); |
404 |
|
Debug("INCL [%s][%s] END\n", q, ecx->p); |
405 |
10725 |
ecx->p = r + 1; |
406 |
10725 |
break; |
407 |
|
default: |
408 |
0 |
VSLb(vdc->vsl, SLT_Error, |
409 |
|
"ESI corruption line %d 0x%02x [%s]\n", |
410 |
0 |
__LINE__, *ecx->p, ecx->p); |
411 |
0 |
WRONG("ESI-codes: Illegal code"); |
412 |
0 |
} |
413 |
41600 |
break; |
414 |
|
case 2: |
415 |
4925 |
ptr = NULL; |
416 |
4925 |
len = 0; |
417 |
4925 |
if (ecx->isgzip && ecx->pecx == NULL) { |
418 |
|
/* |
419 |
|
* We are bytealigned here, so simply emit |
420 |
|
* a gzip literal block with finish bit set. |
421 |
|
*/ |
422 |
1025 |
tailbuf[0] = 0x01; |
423 |
1025 |
tailbuf[1] = 0x00; |
424 |
1025 |
tailbuf[2] = 0x00; |
425 |
1025 |
tailbuf[3] = 0xff; |
426 |
1025 |
tailbuf[4] = 0xff; |
427 |
|
|
428 |
|
/* Emit CRC32 */ |
429 |
1025 |
vle32enc(tailbuf + 5, ecx->crc); |
430 |
|
|
431 |
|
/* MOD(2^32) length */ |
432 |
1025 |
vle32enc(tailbuf + 9, ecx->l_crc); |
433 |
|
|
434 |
1025 |
ptr = tailbuf; |
435 |
1025 |
len = 13; |
436 |
4925 |
} else if (ecx->pecx != NULL) { |
437 |
3400 |
ecx->pecx->crc = crc32_combine(ecx->pecx->crc, |
438 |
1700 |
ecx->crc, ecx->l_crc); |
439 |
1700 |
ecx->pecx->l_crc += ecx->l_crc; |
440 |
1700 |
} |
441 |
4925 |
retval = VDP_bytes(vdc, VDP_END, ptr, len); |
442 |
4925 |
ecx->state = 99; |
443 |
4925 |
return (retval); |
444 |
|
case 3: |
445 |
|
case 4: |
446 |
|
/* |
447 |
|
* There is no guarantee that the 'l' bytes are all |
448 |
|
* in the same storage segment, so loop over storage |
449 |
|
* until we have processed them all. |
450 |
|
*/ |
451 |
32025 |
if (ecx->l <= len) { |
452 |
30850 |
if (ecx->state == 3) |
453 |
28850 |
retval = VDP_bytes(vdc, act, |
454 |
14425 |
pp, ecx->l); |
455 |
30850 |
len -= ecx->l; |
456 |
30850 |
pp += ecx->l; |
457 |
30850 |
ecx->state = 1; |
458 |
30850 |
break; |
459 |
|
} |
460 |
1175 |
if (ecx->state == 3 && len > 0) |
461 |
425 |
retval = VDP_bytes(vdc, act, pp, len); |
462 |
1175 |
ecx->l -= len; |
463 |
1175 |
return (retval); |
464 |
|
case 99: |
465 |
|
/* |
466 |
|
* VEP does not account for the PAD+CRC+LEN |
467 |
|
* so we can see up to approx 15 bytes here. |
468 |
|
*/ |
469 |
250 |
return (retval); |
470 |
|
default: |
471 |
0 |
WRONG("FOO"); |
472 |
0 |
break; |
473 |
|
} |
474 |
82475 |
if (retval) |
475 |
175 |
return (retval); |
476 |
|
} |
477 |
6525 |
} |
478 |
|
|
479 |
|
const struct vdp VDP_esi = { |
480 |
|
.name = "esi", |
481 |
|
.init = ved_vdp_esi_init, |
482 |
|
.bytes = ved_vdp_esi_bytes, |
483 |
|
.fini = ved_vdp_esi_fini, |
484 |
|
}; |
485 |
|
|
486 |
|
/* |
487 |
|
* Account body bytes on req |
488 |
|
* Push bytes to preq |
489 |
|
*/ |
490 |
|
static inline int |
491 |
55125 |
ved_bytes(struct ecx *ecx, enum vdp_action act, |
492 |
|
const void *ptr, ssize_t len) |
493 |
|
{ |
494 |
55125 |
if (act == VDP_END) |
495 |
9250 |
act = VDP_FLUSH; |
496 |
55125 |
return (VDP_bytes(ecx->preq->vdc, act, ptr, len)); |
497 |
|
} |
498 |
|
|
499 |
|
/*--------------------------------------------------------------------- |
500 |
|
* If a gzip'ed ESI object includes a ungzip'ed object, we need to make |
501 |
|
* it looked like a gzip'ed data stream. The official way to do so would |
502 |
|
* be to fire up libvgz and gzip it, but we don't, we fake it. |
503 |
|
* |
504 |
|
* First, we cannot know if it is ungzip'ed on purpose, the admin may |
505 |
|
* know something we don't. |
506 |
|
* |
507 |
|
* What do you mean "BS ?" |
508 |
|
* |
509 |
|
* All right then... |
510 |
|
* |
511 |
|
* The matter of the fact is that we simply will not fire up a gzip in |
512 |
|
* the output path because it costs too much memory and CPU, so we simply |
513 |
|
* wrap the data in very convenient "gzip copy-blocks" and send it down |
514 |
|
* the stream with a bit more overhead. |
515 |
|
*/ |
516 |
|
|
517 |
|
static int v_matchproto_(vdp_fini_f) |
518 |
450 |
ved_pretend_gzip_fini(struct vdp_ctx *vdc, void **priv) |
519 |
|
{ |
520 |
450 |
(void)vdc; |
521 |
450 |
*priv = NULL; |
522 |
450 |
return (0); |
523 |
|
} |
524 |
|
|
525 |
|
static int v_matchproto_(vdp_bytes_f) |
526 |
1250 |
ved_pretend_gzip_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
527 |
|
const void *pv, ssize_t l) |
528 |
|
{ |
529 |
|
uint8_t buf1[5], buf2[5]; |
530 |
|
const uint8_t *p; |
531 |
|
uint16_t lx; |
532 |
|
struct ecx *ecx; |
533 |
|
|
534 |
1250 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
535 |
1250 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
536 |
|
|
537 |
1250 |
(void)priv; |
538 |
1250 |
if (l == 0) |
539 |
250 |
return (ved_bytes(ecx, act, pv, l)); |
540 |
|
|
541 |
1000 |
p = pv; |
542 |
|
|
543 |
1000 |
AN (ecx->isgzip); |
544 |
1000 |
ecx->crc = crc32(ecx->crc, p, l); |
545 |
1000 |
ecx->l_crc += l; |
546 |
|
|
547 |
|
/* |
548 |
|
* buf1 can safely be emitted multiple times for objects longer |
549 |
|
* than 64K-1 bytes. |
550 |
|
*/ |
551 |
1000 |
lx = 65535; |
552 |
1000 |
buf1[0] = 0; |
553 |
1000 |
vle16enc(buf1 + 1, lx); |
554 |
1000 |
vle16enc(buf1 + 3, ~lx); |
555 |
|
|
556 |
2000 |
while (l > 0) { |
557 |
1000 |
if (l >= 65535) { |
558 |
0 |
lx = 65535; |
559 |
0 |
if (ved_bytes(ecx, VDP_NULL, buf1, sizeof buf1)) |
560 |
0 |
return (-1); |
561 |
0 |
} else { |
562 |
1000 |
lx = (uint16_t)l; |
563 |
1000 |
buf2[0] = 0; |
564 |
1000 |
vle16enc(buf2 + 1, lx); |
565 |
1000 |
vle16enc(buf2 + 3, ~lx); |
566 |
1000 |
if (ved_bytes(ecx, VDP_NULL, buf2, sizeof buf2)) |
567 |
0 |
return (-1); |
568 |
|
} |
569 |
1000 |
if (ved_bytes(ecx, VDP_NULL, p, lx)) |
570 |
0 |
return (-1); |
571 |
1000 |
l -= lx; |
572 |
1000 |
p += lx; |
573 |
|
} |
574 |
|
/* buf1 & buf2 are local, so we have to flush */ |
575 |
1000 |
return (ved_bytes(ecx, VDP_FLUSH, NULL, 0)); |
576 |
1250 |
} |
577 |
|
|
578 |
|
static const struct vdp ved_pretend_gz = { |
579 |
|
.name = "PGZ", |
580 |
|
.bytes = ved_pretend_gzip_bytes, |
581 |
|
.fini = ved_pretend_gzip_fini, |
582 |
|
}; |
583 |
|
|
584 |
|
/*--------------------------------------------------------------------- |
585 |
|
* Include a gzip'ed object in a gzip'ed ESI object delivery |
586 |
|
* |
587 |
|
* This is the interesting case: Deliver all the deflate blocks, stripping |
588 |
|
* the "LAST" bit of the last one and padding it, as necessary, to a byte |
589 |
|
* boundary. |
590 |
|
* |
591 |
|
*/ |
592 |
|
|
593 |
|
struct ved_foo { |
594 |
|
unsigned magic; |
595 |
|
#define VED_FOO_MAGIC 0x6a5a262d |
596 |
|
struct ecx *ecx; |
597 |
|
struct objcore *objcore; |
598 |
|
uint64_t start, last, stop, lpad; |
599 |
|
ssize_t ll; |
600 |
|
uint64_t olen; |
601 |
|
uint8_t dbits[8]; |
602 |
|
uint8_t tailbuf[8]; |
603 |
|
}; |
604 |
|
|
605 |
|
static int v_matchproto_(vdp_init_f) |
606 |
1025 |
ved_gzgz_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, struct objcore *oc) |
607 |
|
{ |
608 |
|
ssize_t l; |
609 |
|
const char *p; |
610 |
|
struct ved_foo *foo; |
611 |
|
struct req *req; |
612 |
|
|
613 |
1025 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
614 |
1025 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
615 |
1025 |
(void)oc; |
616 |
1025 |
req = vdc->req; |
617 |
1025 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
618 |
1025 |
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC); |
619 |
1025 |
CHECK_OBJ_NOTNULL(foo->objcore, OBJCORE_MAGIC); |
620 |
|
|
621 |
1025 |
memset(foo->tailbuf, 0xdd, sizeof foo->tailbuf); |
622 |
|
|
623 |
1025 |
AN(ObjCheckFlag(vdc->wrk, foo->objcore, OF_GZIPED)); |
624 |
|
|
625 |
1025 |
p = ObjGetAttr(vdc->wrk, foo->objcore, OA_GZIPBITS, &l); |
626 |
1025 |
AN(p); |
627 |
1025 |
assert(l == 32); |
628 |
1025 |
foo->start = vbe64dec(p); |
629 |
1025 |
foo->last = vbe64dec(p + 8); |
630 |
1025 |
foo->stop = vbe64dec(p + 16); |
631 |
1025 |
foo->olen = ObjGetLen(vdc->wrk, foo->objcore); |
632 |
1025 |
assert(foo->start > 0 && foo->start < foo->olen * 8); |
633 |
1025 |
assert(foo->last > 0 && foo->last < foo->olen * 8); |
634 |
1025 |
assert(foo->stop > 0 && foo->stop < foo->olen * 8); |
635 |
1025 |
assert(foo->last >= foo->start); |
636 |
1025 |
assert(foo->last < foo->stop); |
637 |
|
|
638 |
|
/* The start bit must be byte aligned. */ |
639 |
1025 |
AZ(foo->start & 7); |
640 |
1025 |
return (0); |
641 |
|
} |
642 |
|
|
643 |
|
/* |
644 |
|
* XXX: for act == VDP_END || act == VDP_FLUSH, we send a flush more often than |
645 |
|
* we need. The VDP_END case would trip our "at most one VDP_END call" assertion |
646 |
|
* in VDP_bytes(), but ved_bytes() covers it. |
647 |
|
* |
648 |
|
* To avoid unnecessary chunks downstream, it would be nice to re-structure the |
649 |
|
* code to identify the last block, send VDP_END/VDP_FLUSH for that one and |
650 |
|
* VDP_NULL for anything before it. |
651 |
|
*/ |
652 |
|
|
653 |
|
static int v_matchproto_(vdp_bytes_f) |
654 |
1500 |
ved_gzgz_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
655 |
|
const void *ptr, ssize_t len) |
656 |
|
{ |
657 |
|
struct ved_foo *foo; |
658 |
|
const uint8_t *pp; |
659 |
|
ssize_t dl; |
660 |
|
ssize_t l; |
661 |
|
|
662 |
1500 |
(void)vdc; |
663 |
1500 |
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC); |
664 |
1500 |
pp = ptr; |
665 |
1500 |
if (len > 0) { |
666 |
|
/* Skip over the GZIP header */ |
667 |
1500 |
dl = foo->start / 8 - foo->ll; |
668 |
1500 |
if (dl > 0) { |
669 |
|
/* Before foo.start, skip */ |
670 |
1100 |
if (dl > len) |
671 |
75 |
dl = len; |
672 |
1100 |
foo->ll += dl; |
673 |
1100 |
len -= dl; |
674 |
1100 |
pp += dl; |
675 |
1100 |
} |
676 |
1500 |
} |
677 |
1500 |
if (len > 0) { |
678 |
|
/* The main body of the object */ |
679 |
1425 |
dl = foo->last / 8 - foo->ll; |
680 |
1425 |
if (dl > 0) { |
681 |
450 |
dl = vmin(dl, len); |
682 |
450 |
if (ved_bytes(foo->ecx, act, pp, dl)) |
683 |
0 |
return (-1); |
684 |
450 |
foo->ll += dl; |
685 |
450 |
len -= dl; |
686 |
450 |
pp += dl; |
687 |
450 |
} |
688 |
1425 |
} |
689 |
1500 |
if (len > 0 && foo->ll == foo->last / 8) { |
690 |
|
/* Remove the "LAST" bit */ |
691 |
1025 |
foo->dbits[0] = *pp; |
692 |
1025 |
foo->dbits[0] &= ~(1U << (foo->last & 7)); |
693 |
1025 |
if (ved_bytes(foo->ecx, act, foo->dbits, 1)) |
694 |
0 |
return (-1); |
695 |
1025 |
foo->ll++; |
696 |
1025 |
len--; |
697 |
1025 |
pp++; |
698 |
1025 |
} |
699 |
1500 |
if (len > 0) { |
700 |
|
/* Last block */ |
701 |
1375 |
dl = foo->stop / 8 - foo->ll; |
702 |
1375 |
if (dl > 0) { |
703 |
675 |
dl = vmin(dl, len); |
704 |
675 |
if (ved_bytes(foo->ecx, act, pp, dl)) |
705 |
0 |
return (-1); |
706 |
675 |
foo->ll += dl; |
707 |
675 |
len -= dl; |
708 |
675 |
pp += dl; |
709 |
675 |
} |
710 |
1375 |
} |
711 |
1500 |
if (len > 0 && (foo->stop & 7) && foo->ll == foo->stop / 8) { |
712 |
|
/* Add alignment to byte boundary */ |
713 |
825 |
foo->dbits[1] = *pp; |
714 |
825 |
foo->ll++; |
715 |
825 |
len--; |
716 |
825 |
pp++; |
717 |
825 |
switch ((int)(foo->stop & 7)) { |
718 |
|
case 1: /* |
719 |
|
* x000.... |
720 |
|
* 00000000 00000000 11111111 11111111 |
721 |
|
*/ |
722 |
|
case 3: /* |
723 |
|
* xxx000.. |
724 |
|
* 00000000 00000000 11111111 11111111 |
725 |
|
*/ |
726 |
|
case 5: /* |
727 |
|
* xxxxx000 |
728 |
|
* 00000000 00000000 11111111 11111111 |
729 |
|
*/ |
730 |
150 |
foo->dbits[2] = 0x00; foo->dbits[3] = 0x00; |
731 |
150 |
foo->dbits[4] = 0xff; foo->dbits[5] = 0xff; |
732 |
150 |
foo->lpad = 5; |
733 |
150 |
break; |
734 |
|
case 2: /* xx010000 00000100 00000001 00000000 */ |
735 |
475 |
foo->dbits[1] |= 0x08; |
736 |
475 |
foo->dbits[2] = 0x20; |
737 |
475 |
foo->dbits[3] = 0x80; |
738 |
475 |
foo->dbits[4] = 0x00; |
739 |
475 |
foo->lpad = 4; |
740 |
475 |
break; |
741 |
|
case 4: /* xxxx0100 00000001 00000000 */ |
742 |
50 |
foo->dbits[1] |= 0x20; |
743 |
50 |
foo->dbits[2] = 0x80; |
744 |
50 |
foo->dbits[3] = 0x00; |
745 |
50 |
foo->lpad = 3; |
746 |
50 |
break; |
747 |
|
case 6: /* xxxxxx01 00000000 */ |
748 |
100 |
foo->dbits[1] |= 0x80; |
749 |
100 |
foo->dbits[2] = 0x00; |
750 |
100 |
foo->lpad = 2; |
751 |
100 |
break; |
752 |
|
case 7: /* |
753 |
|
* xxxxxxx0 |
754 |
|
* 00...... |
755 |
|
* 00000000 00000000 11111111 11111111 |
756 |
|
*/ |
757 |
50 |
foo->dbits[2] = 0x00; |
758 |
50 |
foo->dbits[3] = 0x00; foo->dbits[4] = 0x00; |
759 |
50 |
foo->dbits[5] = 0xff; foo->dbits[6] = 0xff; |
760 |
50 |
foo->lpad = 6; |
761 |
50 |
break; |
762 |
0 |
case 0: /* xxxxxxxx */ |
763 |
|
default: |
764 |
0 |
WRONG("compiler must be broken"); |
765 |
0 |
} |
766 |
825 |
if (ved_bytes(foo->ecx, act, foo->dbits + 1, foo->lpad)) |
767 |
0 |
return (-1); |
768 |
825 |
} |
769 |
1500 |
if (len > 0) { |
770 |
|
/* Recover GZIP tail */ |
771 |
1225 |
dl = foo->olen - foo->ll; |
772 |
1225 |
assert(dl >= 0); |
773 |
1225 |
if (dl > len) |
774 |
200 |
dl = len; |
775 |
1225 |
if (dl > 0) { |
776 |
1225 |
assert(dl <= 8); |
777 |
1225 |
l = foo->ll - (foo->olen - 8); |
778 |
1225 |
assert(l >= 0); |
779 |
1225 |
assert(l <= 8); |
780 |
1225 |
assert(l + dl <= 8); |
781 |
1225 |
memcpy(foo->tailbuf + l, pp, dl); |
782 |
1225 |
foo->ll += dl; |
783 |
1225 |
len -= dl; |
784 |
1225 |
} |
785 |
1225 |
} |
786 |
1500 |
assert(len == 0); |
787 |
1500 |
return (0); |
788 |
1500 |
} |
789 |
|
|
790 |
|
static int v_matchproto_(vdp_fini_f) |
791 |
1025 |
ved_gzgz_fini(struct vdp_ctx *vdc, void **priv) |
792 |
|
{ |
793 |
|
uint32_t icrc; |
794 |
|
uint32_t ilen; |
795 |
|
struct ved_foo *foo; |
796 |
|
|
797 |
1025 |
(void)vdc; |
798 |
1025 |
TAKE_OBJ_NOTNULL(foo, priv, VED_FOO_MAGIC); |
799 |
|
|
800 |
|
/* XXX |
801 |
|
* this works due to the esi layering, a VDP pushing bytes from _fini |
802 |
|
* will otherwise have it's own _bytes method called. |
803 |
|
* |
804 |
|
* Could rewrite use VDP_END |
805 |
|
*/ |
806 |
1025 |
(void)ved_bytes(foo->ecx, VDP_FLUSH, NULL, 0); |
807 |
|
|
808 |
1025 |
icrc = vle32dec(foo->tailbuf); |
809 |
1025 |
ilen = vle32dec(foo->tailbuf + 4); |
810 |
1025 |
foo->ecx->crc = crc32_combine(foo->ecx->crc, icrc, ilen); |
811 |
1025 |
foo->ecx->l_crc += ilen; |
812 |
|
|
813 |
1025 |
return (0); |
814 |
|
} |
815 |
|
|
816 |
|
static const struct vdp ved_gzgz = { |
817 |
|
.name = "VZZ", |
818 |
|
.init = ved_gzgz_init, |
819 |
|
.bytes = ved_gzgz_bytes, |
820 |
|
.fini = ved_gzgz_fini, |
821 |
|
}; |
822 |
|
|
823 |
|
/*-------------------------------------------------------------------- |
824 |
|
* Straight through without processing. |
825 |
|
*/ |
826 |
|
|
827 |
|
static int v_matchproto_(vdp_fini_f) |
828 |
7200 |
ved_vdp_fini(struct vdp_ctx *vdc, void **priv) |
829 |
|
{ |
830 |
7200 |
(void)vdc; |
831 |
7200 |
*priv = NULL; |
832 |
7200 |
return (0); |
833 |
|
} |
834 |
|
|
835 |
|
static int v_matchproto_(vdp_bytes_f) |
836 |
47875 |
ved_vdp_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
837 |
|
const void *ptr, ssize_t len) |
838 |
|
{ |
839 |
|
struct ecx *ecx; |
840 |
|
|
841 |
47875 |
(void)vdc; |
842 |
47875 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
843 |
47875 |
return (ved_bytes(ecx, act, ptr, len)); |
844 |
|
} |
845 |
|
|
846 |
|
static const struct vdp ved_ved = { |
847 |
|
.name = "VED", |
848 |
|
.bytes = ved_vdp_bytes, |
849 |
|
.fini = ved_vdp_fini, |
850 |
|
}; |
851 |
|
|
852 |
|
/*--------------------------------------------------------------------*/ |
853 |
|
|
854 |
|
static void v_matchproto_(vtr_deliver_f) |
855 |
9075 |
ved_deliver(struct req *req, struct boc *boc, int wantbody) |
856 |
|
{ |
857 |
9075 |
int i = 0; |
858 |
|
const char *p; |
859 |
|
uint16_t status; |
860 |
|
struct ecx *ecx; |
861 |
|
struct ved_foo foo[1]; |
862 |
|
struct vrt_ctx ctx[1]; |
863 |
|
|
864 |
9075 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
865 |
9075 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
866 |
9075 |
CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); |
867 |
|
|
868 |
9075 |
CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); |
869 |
|
|
870 |
9075 |
if (wantbody == 0) |
871 |
325 |
return; |
872 |
|
|
873 |
8750 |
status = req->resp->status % 1000; |
874 |
|
|
875 |
8750 |
if (!ecx->incl_cont && status != 200 && status != 204) { |
876 |
25 |
req->top->topreq->vdc->retval = -1; |
877 |
25 |
req->top->topreq->doclose = req->doclose; |
878 |
25 |
return; |
879 |
|
} |
880 |
|
|
881 |
8725 |
if (boc == NULL && ObjGetLen(req->wrk, req->objcore) == 0) |
882 |
0 |
return; |
883 |
|
|
884 |
8725 |
if (http_GetHdr(req->resp, H_Content_Encoding, &p)) |
885 |
2700 |
i = http_coding_eq(p, gzip); |
886 |
8725 |
if (i) |
887 |
2700 |
i = ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED); |
888 |
|
|
889 |
8725 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
890 |
8725 |
VCL_Req2Ctx(ctx, req); |
891 |
|
|
892 |
8725 |
if (ecx->isgzip && i && !(req->res_mode & RES_ESI)) { |
893 |
|
/* A gzip'ed include which is not ESI processed */ |
894 |
|
|
895 |
|
/* OA_GZIPBITS are not valid until BOS_FINISHED */ |
896 |
1050 |
if (boc != NULL) |
897 |
350 |
ObjWaitState(req->objcore, BOS_FINISHED); |
898 |
|
|
899 |
1050 |
if (req->objcore->flags & OC_F_FAILED) { |
900 |
|
/* No way of signalling errors in the middle of |
901 |
|
the ESI body. Omit this ESI fragment. */ |
902 |
25 |
return; |
903 |
|
} |
904 |
|
|
905 |
1025 |
INIT_OBJ(foo, VED_FOO_MAGIC); |
906 |
1025 |
foo->ecx = ecx; |
907 |
1025 |
foo->objcore = req->objcore; |
908 |
1025 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_gzgz, foo); |
909 |
|
|
910 |
8700 |
} else if (ecx->isgzip && !i) { |
911 |
|
/* Non-Gzip'ed include in gzip'ed parent */ |
912 |
450 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_pretend_gz, ecx); |
913 |
450 |
} else { |
914 |
|
/* Anything else goes straight through */ |
915 |
7225 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_ved, ecx); |
916 |
|
} |
917 |
|
|
918 |
8700 |
if (i == 0) { |
919 |
8675 |
i = VDP_DeliverObj(req->vdc, req->objcore); |
920 |
8675 |
} else { |
921 |
25 |
VSLb(req->vsl, SLT_Error, "Failure to push ESI processors"); |
922 |
25 |
req->doclose = SC_OVERLOAD; |
923 |
|
} |
924 |
|
|
925 |
8700 |
if (i && req->doclose == SC_NULL) |
926 |
75 |
req->doclose = SC_REM_CLOSE; |
927 |
|
|
928 |
8700 |
req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, boc); |
929 |
|
|
930 |
8700 |
if (i && !ecx->incl_cont) { |
931 |
75 |
req->top->topreq->vdc->retval = -1; |
932 |
75 |
req->top->topreq->doclose = req->doclose; |
933 |
75 |
} |
934 |
9075 |
} |