varnish-cache/bin/varnishd/cache/cache_esi_deliver.c
0
/*-
1
 * Copyright (c) 2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * VED - Varnish Esi Delivery
30
 */
31
32
#include "config.h"
33
34
#include "cache_varnishd.h"
35
36
#include <stdlib.h>
37
38
#include "cache_transport.h"
39
#include "cache_filter.h"
40
#include "cache_vgz.h"
41
42
#include "vct.h"
43
#include "vtim.h"
44
#include "cache_esi.h"
45
#include "vend.h"
46
#include "vgz.h"
47
48
static vtr_deliver_f ved_deliver;
49
static vtr_reembark_f ved_reembark;
50
51
static const uint8_t gzip_hdr[] = {
52
        0x1f, 0x8b, 0x08,
53
        0x00, 0x00, 0x00, 0x00,
54
        0x00,
55
        0x02, 0x03
56
};
57
58
struct ecx {
59
        unsigned        magic;
60
#define ECX_MAGIC       0x0b0f9163
61
        const uint8_t   *p;
62
        const uint8_t   *e;
63
        int             state;
64
        ssize_t         l;
65
        int             isgzip;
66
        int             woken;
67
        int             abrt;
68
69
        struct req      *preq;
70
        struct ecx      *pecx;
71
        ssize_t         l_crc;
72
        uint32_t        crc;
73
};
74
75
static int v_matchproto_(vtr_minimal_response_f)
76 0
ved_minimal_response(struct req *req, uint16_t status)
77
{
78 0
        (void)req;
79 0
        (void)status;
80 0
        WRONG("esi:includes should not try minimal responses");
81 0
}
82
83
static const struct transport VED_transport = {
84
        .magic =                TRANSPORT_MAGIC,
85
        .name =                 "ESI_INCLUDE",
86
        .deliver =              ved_deliver,
87
        .reembark =             ved_reembark,
88
        .minimal_response =     ved_minimal_response,
89
};
90
91
/*--------------------------------------------------------------------*/
92
93
static void v_matchproto_(vtr_reembark_f)
94 1
ved_reembark(struct worker *wrk, struct req *req)
95
{
96
        struct ecx *ecx;
97
98 1
        (void)wrk;
99 1
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
100 1
        CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC);
101 1
        Lck_Lock(&req->sp->mtx);
102 1
        ecx->woken = 1;
103 1
        PTOK(pthread_cond_signal(&ecx->preq->wrk->cond));
104 1
        Lck_Unlock(&req->sp->mtx);
105 1
}
106
107
/*--------------------------------------------------------------------*/
108
109
static void
110 433
ved_include(struct req *preq, const char *src, const char *host,
111
    struct ecx *ecx)
112
{
113
        struct worker *wrk;
114
        struct sess *sp;
115
        struct req *req;
116
        enum req_fsm_nxt s;
117
118 433
        CHECK_OBJ_NOTNULL(preq, REQ_MAGIC);
119 433
        CHECK_OBJ_NOTNULL(preq->top, REQTOP_MAGIC);
120 433
        sp = preq->sp;
121 433
        CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
122 433
        CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC);
123 433
        wrk = preq->wrk;
124
125 433
        if (preq->esi_level >= cache_param->max_esi_depth) {
126 132
                VSLb(preq->vsl, SLT_VCL_Error,
127
                    "ESI depth limit reached (param max_esi_depth = %u)",
128 66
                    cache_param->max_esi_depth);
129 66
                if (ecx->abrt)
130 1
                        preq->top->topreq->vdc->retval = -1;
131 66
                return;
132
        }
133
134 367
        req = Req_New(sp, preq);
135 367
        AN(req);
136 367
        THR_SetRequest(req);
137 367
        assert(IS_NO_VXID(req->vsl->wid));
138 367
        req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER);
139
140 367
        wrk->stats->esi_req++;
141 367
        req->esi_level = preq->esi_level + 1;
142
143 734
        VSLb(req->vsl, SLT_Begin, "req %ju esi %u",
144 367
            (uintmax_t)VXID(preq->vsl->wid), req->esi_level);
145 734
        VSLb(preq->vsl, SLT_Link, "req %ju esi %u",
146 367
            (uintmax_t)VXID(req->vsl->wid), req->esi_level);
147
148 367
        VSLb_ts_req(req, "Start", W_TIM_real(wrk));
149
150 367
        HTTP_Setup(req->http, req->ws, req->vsl, SLT_ReqMethod);
151 367
        HTTP_Dup(req->http, preq->http0);
152
153 367
        http_SetH(req->http, HTTP_HDR_URL, src);
154 367
        if (host != NULL && *host != '\0')  {
155 2
                http_Unset(req->http, H_Host);
156 2
                http_SetHeader(req->http, host);
157 2
        }
158
159 367
        http_ForceField(req->http, HTTP_HDR_METHOD, "GET");
160 367
        http_ForceField(req->http, HTTP_HDR_PROTO, "HTTP/1.1");
161
162
        /* Don't allow conditionals, we can't use a 304 */
163 367
        http_Unset(req->http, H_If_Modified_Since);
164 367
        http_Unset(req->http, H_If_None_Match);
165
166
        /* Don't allow Range */
167 367
        http_Unset(req->http, H_Range);
168
169
        /* Set Accept-Encoding according to what we want */
170 367
        if (ecx->isgzip)
171 129
                http_ForceHeader(req->http, H_Accept_Encoding, "gzip");
172
        else
173 238
                http_Unset(req->http, H_Accept_Encoding);
174
175
        /* Client content already taken care of */
176 367
        http_Unset(req->http, H_Content_Length);
177 367
        http_Unset(req->http, H_Transfer_Encoding);
178 367
        req->req_body_status = BS_NONE;
179
180 367
        AZ(req->vcl);
181 367
        assert(req->top == preq->top);
182 367
        if (req->top->vcl0)
183 2
                req->vcl = req->top->vcl0;
184
        else
185 365
                req->vcl = preq->vcl;
186 367
        VCL_Ref(req->vcl);
187
188 367
        assert(req->req_step == R_STP_TRANSPORT);
189 367
        req->t_req = preq->t_req;
190
191 367
        req->transport = &VED_transport;
192 367
        req->transport_priv = ecx;
193
194 367
        VCL_TaskEnter(req->privs);
195
196 368
        while (1) {
197 368
                CNT_Embark(wrk, req);
198 368
                ecx->woken = 0;
199 368
                s = CNT_Request(req);
200 368
                if (s == REQ_FSM_DONE)
201 367
                        break;
202 1
                DSL(DBG_WAITINGLIST, req->vsl->wid,
203
                    "waiting for ESI (%d)", (int)s);
204 1
                assert(s == REQ_FSM_DISEMBARK);
205 1
                Lck_Lock(&sp->mtx);
206 1
                if (!ecx->woken)
207 1
                        (void)Lck_CondWait(&ecx->preq->wrk->cond, &sp->mtx);
208 1
                Lck_Unlock(&sp->mtx);
209 1
                AZ(req->wrk);
210
        }
211
212 367
        VCL_Rel(&req->vcl);
213
214 367
        req->wrk = NULL;
215 367
        THR_SetRequest(preq);
216
217 367
        Req_Cleanup(sp, wrk, req);
218 367
        Req_Release(req);
219 433
}
220
221
/*--------------------------------------------------------------------*/
222
223
//#define Debug(fmt, ...) printf(fmt, __VA_ARGS__)
224
#define Debug(fmt, ...) /**/
225
226
static ssize_t
227 1519
ved_decode_len(struct vsl_log *vsl, const uint8_t **pp)
228
{
229
        const uint8_t *p;
230
        ssize_t l;
231
232 1519
        p = *pp;
233 1519
        switch (*p & 15) {
234
        case 1:
235 1326
                l = p[1];
236 1326
                p += 2;
237 1326
                break;
238
        case 2:
239 191
                l = vbe16dec(p + 1);
240 191
                p += 3;
241 191
                break;
242
        case 8:
243 2
                l = vbe64dec(p + 1);
244 2
                p += 9;
245 2
                break;
246
        default:
247 0
                VSLb(vsl, SLT_Error,
248 0
                    "ESI-corruption: Illegal Length %d %d\n", *p, (*p & 15));
249 0
                WRONG("ESI-codes: illegal length");
250 0
        }
251 1519
        *pp = p;
252 1519
        assert(l > 0);
253 1519
        return (l);
254
}
255
256
/*---------------------------------------------------------------------
257
 */
258
259
static int v_matchproto_(vdp_init_f)
260 208
ved_vdp_esi_init(VRT_CTX, struct vdp_ctx *vdc, void **priv)
261
{
262
        struct ecx *ecx;
263
264 208
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
265 208
        CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC);
266 208
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
267 208
        CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC);
268 208
        CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC);
269 208
        AN(vdc->clen);
270 208
        AN(priv);
271
272 208
        AZ(*priv);
273 208
        if (vdc->oc == NULL || !ObjHasAttr(vdc->wrk, vdc->oc, OA_ESIDATA))
274 0
                return (1);
275
276 208
        if (ctx->req == NULL) {
277 0
                VSLb(vdc->vsl, SLT_Error,
278
                     "esi can only be used on the client side");
279 0
                return (1);
280
        }
281
282 208
        ALLOC_OBJ(ecx, ECX_MAGIC);
283 208
        AN(ecx);
284 208
        assert(sizeof gzip_hdr == 10);
285 208
        ecx->preq = ctx->req;
286 208
        *priv = ecx;
287 208
        RFC2616_Weaken_Etag(vdc->hp);
288
289 208
        ctx->req->res_esi = 1;
290 208
        if (*vdc->clen != 0)
291 208
                *vdc->clen = -1;
292 208
        if (ctx->req->esi_level > 0) {
293 84
                assert(ctx->req->transport == &VED_transport);
294 84
                CAST_OBJ_NOTNULL(ecx->pecx, ctx->req->transport_priv, ECX_MAGIC);
295 84
                if (!ecx->pecx->isgzip)
296 16
                        ecx->pecx = NULL;
297 84
        }
298
299 208
        return (0);
300 208
}
301
302
static int v_matchproto_(vdp_fini_f)
303 208
ved_vdp_esi_fini(struct vdp_ctx *vdc, void **priv)
304
{
305
        struct ecx *ecx;
306
307 208
        (void)vdc;
308 208
        TAKE_OBJ_NOTNULL(ecx, priv, ECX_MAGIC);
309 208
        FREE_OBJ(ecx);
310 208
        return (0);
311
}
312
313
static int v_matchproto_(vdp_bytes_f)
314 264
ved_vdp_esi_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv,
315
    const void *ptr, ssize_t len)
316
{
317
        const uint8_t *q, *r;
318 264
        ssize_t l = 0;
319 264
        uint32_t icrc = 0;
320
        uint8_t tailbuf[8 + 5];
321
        const uint8_t *pp;
322
        struct ecx *ecx;
323 264
        int retval = 0;
324
325 264
        if (act == VDP_END)
326 207
                act = VDP_FLUSH;
327
328 264
        AN(priv);
329 264
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
330 264
        CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC);
331 264
        pp = ptr;
332
333 3588
        while (1) {
334 3588
                switch (ecx->state) {
335
                case 0:
336 207
                        ecx->p = ObjGetAttr(vdc->wrk, ecx->preq->objcore,
337
                            OA_ESIDATA, &l);
338 207
                        AN(ecx->p);
339 207
                        assert(l > 0);
340 207
                        ecx->e = ecx->p + l;
341
342 207
                        if (*ecx->p == VEC_GZ) {
343 107
                                if (ecx->pecx == NULL)
344 41
                                        retval = VDP_bytes(vdc, VDP_NULL,
345
                                            gzip_hdr, 10);
346 107
                                ecx->l_crc = 0;
347 107
                                ecx->crc = crc32(0L, Z_NULL, 0);
348 107
                                ecx->isgzip = 1;
349 107
                                ecx->p++;
350 107
                        }
351 207
                        ecx->state = 1;
352 207
                        break;
353
                case 1:
354 1879
                        if (ecx->p >= ecx->e) {
355 201
                                ecx->state = 2;
356 201
                                break;
357
                        }
358 1678
                        switch (*ecx->p) {
359
                        case VEC_V1:
360
                        case VEC_V2:
361
                        case VEC_V8:
362 583
                                ecx->l = ved_decode_len(vdc->vsl, &ecx->p);
363 583
                                if (ecx->l < 0)
364 0
                                        return (-1);
365 583
                                if (ecx->isgzip) {
366 275
                                        assert(*ecx->p == VEC_C1 ||
367
                                            *ecx->p == VEC_C2 ||
368
                                            *ecx->p == VEC_C8);
369 275
                                        l = ved_decode_len(vdc->vsl, &ecx->p);
370 275
                                        if (l < 0)
371 0
                                                return (-1);
372 275
                                        icrc = vbe32dec(ecx->p);
373 275
                                        ecx->p += 4;
374 275
                                        ecx->crc = crc32_combine(
375 275
                                            ecx->crc, icrc, l);
376 275
                                        ecx->l_crc += l;
377 275
                                }
378 583
                                ecx->state = 3;
379 583
                                break;
380
                        case VEC_S1:
381
                        case VEC_S2:
382
                        case VEC_S8:
383 661
                                ecx->l = ved_decode_len(vdc->vsl, &ecx->p);
384 661
                                if (ecx->l < 0)
385 0
                                        return (-1);
386
                                Debug("SKIP1(%d)\n", (int)ecx->l);
387 661
                                ecx->state = 4;
388 661
                                break;
389
                        case VEC_IA:
390 304
                                ecx->abrt =
391 304
                                    FEATURE(FEATURE_ESI_INCLUDE_ONERROR);
392
                                /* FALLTHROUGH */
393
                        case VEC_IC:
394 434
                                ecx->p++;
395 434
                                q = (void*)strchr((const char*)ecx->p, '\0');
396 434
                                AN(q);
397 434
                                q++;
398 434
                                r = (void*)strchr((const char*)q, '\0');
399 434
                                AN(r);
400 434
                                if (VDP_bytes(vdc, VDP_FLUSH, NULL, 0)) {
401 1
                                        ecx->p = ecx->e;
402 1
                                        break;
403
                                }
404
                                Debug("INCL [%s][%s] BEGIN\n", q, ecx->p);
405 866
                                ved_include(ecx->preq,
406 433
                                    (const char*)q, (const char*)ecx->p, ecx);
407
                                Debug("INCL [%s][%s] END\n", q, ecx->p);
408 433
                                ecx->p = r + 1;
409 433
                                break;
410
                        default:
411 0
                                VSLb(vdc->vsl, SLT_Error,
412
                                    "ESI corruption line %d 0x%02x [%s]\n",
413 0
                                    __LINE__, *ecx->p, ecx->p);
414 0
                                WRONG("ESI-codes: Illegal code");
415 0
                        }
416 1678
                        break;
417
                case 2:
418 201
                        ptr = NULL;
419 201
                        len = 0;
420 201
                        if (ecx->isgzip && ecx->pecx == NULL) {
421
                                /*
422
                                 * We are bytealigned here, so simply emit
423
                                 * a gzip literal block with finish bit set.
424
                                 */
425 41
                                tailbuf[0] = 0x01;
426 41
                                tailbuf[1] = 0x00;
427 41
                                tailbuf[2] = 0x00;
428 41
                                tailbuf[3] = 0xff;
429 41
                                tailbuf[4] = 0xff;
430
431
                                /* Emit CRC32 */
432 41
                                vle32enc(tailbuf + 5, ecx->crc);
433
434
                                /* MOD(2^32) length */
435 41
                                vle32enc(tailbuf + 9, ecx->l_crc);
436
437 41
                                ptr = tailbuf;
438 41
                                len = 13;
439 201
                        } else if (ecx->pecx != NULL) {
440 136
                                ecx->pecx->crc = crc32_combine(ecx->pecx->crc,
441 68
                                    ecx->crc, ecx->l_crc);
442 68
                                ecx->pecx->l_crc += ecx->l_crc;
443 68
                        }
444 201
                        retval = VDP_bytes(vdc, VDP_END, ptr, len);
445 201
                        ecx->state = 99;
446 201
                        return (retval);
447
                case 3:
448
                case 4:
449
                        /*
450
                         * There is no guarantee that the 'l' bytes are all
451
                         * in the same storage segment, so loop over storage
452
                         * until we have processed them all.
453
                         */
454 1291
                        if (ecx->l <= len) {
455 1244
                                if (ecx->state == 3)
456 1166
                                        retval = VDP_bytes(vdc, act,
457 583
                                            pp, ecx->l);
458 1244
                                len -= ecx->l;
459 1244
                                pp += ecx->l;
460 1244
                                ecx->state = 1;
461 1244
                                break;
462
                        }
463 47
                        if (ecx->state == 3 && len > 0)
464 17
                                retval = VDP_bytes(vdc, act, pp, len);
465 47
                        ecx->l -= len;
466 47
                        return (retval);
467
                case 99:
468
                        /*
469
                         * VEP does not account for the PAD+CRC+LEN
470
                         * so we can see up to approx 15 bytes here.
471
                         */
472 10
                        return (retval);
473
                default:
474 0
                        WRONG("FOO");
475 0
                        break;
476
                }
477 3330
                if (retval)
478 6
                        return (retval);
479
        }
480 264
}
481
482
const struct vdp VDP_esi = {
483
        .name =         "esi",
484
        .init =         ved_vdp_esi_init,
485
        .bytes =        ved_vdp_esi_bytes,
486
        .fini =         ved_vdp_esi_fini,
487
};
488
489
/*
490
 * Account body bytes on req
491
 * Push bytes to preq
492
 */
493
static inline int
494 2209
ved_bytes(struct ecx *ecx, enum vdp_action act,
495
    const void *ptr, ssize_t len)
496
{
497 2209
        if (act == VDP_END)
498 371
                act = VDP_FLUSH;
499 2209
        return (VDP_bytes(ecx->preq->vdc, act, ptr, len));
500
}
501
502
/*---------------------------------------------------------------------
503
 * If a gzipped ESI object includes a ungzipped object, we need to make
504
 * it looked like a gzipped data stream.  The official way to do so would
505
 * be to fire up libvgz and gzip it, but we don't, we fake it.
506
 *
507
 * First, we cannot know if it is ungzipped on purpose, the admin may
508
 * know something we don't.
509
 *
510
 * What do you mean "BS ?"
511
 *
512
 * All right then...
513
 *
514
 * The matter of the fact is that we simply will not fire up a gzip in
515
 * the output path because it costs too much memory and CPU, so we simply
516
 * wrap the data in very convenient "gzip copy-blocks" and send it down
517
 * the stream with a bit more overhead.
518
 */
519
520
static int v_matchproto_(vdp_fini_f)
521 18
ved_pretend_gzip_fini(struct vdp_ctx *vdc, void **priv)
522
{
523 18
        (void)vdc;
524 18
        *priv = NULL;
525 18
        return (0);
526
}
527
528
static int v_matchproto_(vdp_bytes_f)
529 50
ved_pretend_gzip_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv,
530
    const void *pv, ssize_t l)
531
{
532
        uint8_t buf1[5], buf2[5];
533
        const uint8_t *p;
534
        uint16_t lx;
535
        struct ecx *ecx;
536
537 50
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
538 50
        CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC);
539
540 50
        (void)priv;
541 50
        if (l == 0)
542 10
                return (ved_bytes(ecx, act, pv, l));
543
544 40
        p = pv;
545
546 40
        AN (ecx->isgzip);
547 40
        ecx->crc = crc32(ecx->crc, p, l);
548 40
        ecx->l_crc += l;
549
550
        /*
551
         * buf1 can safely be emitted multiple times for objects longer
552
         * than 64K-1 bytes.
553
         */
554 40
        lx = 65535;
555 40
        buf1[0] = 0;
556 40
        vle16enc(buf1 + 1, lx);
557 40
        vle16enc(buf1 + 3, ~lx);
558
559 80
        while (l > 0) {
560 40
                if (l >= 65535) {
561 0
                        lx = 65535;
562 0
                        if (ved_bytes(ecx, VDP_NULL, buf1, sizeof buf1))
563 0
                                return (-1);
564 0
                } else {
565 40
                        lx = (uint16_t)l;
566 40
                        buf2[0] = 0;
567 40
                        vle16enc(buf2 + 1, lx);
568 40
                        vle16enc(buf2 + 3, ~lx);
569 40
                        if (ved_bytes(ecx, VDP_NULL, buf2, sizeof buf2))
570 0
                                return (-1);
571
                }
572 40
                if (ved_bytes(ecx, VDP_NULL, p, lx))
573 0
                        return (-1);
574 40
                l -= lx;
575 40
                p += lx;
576
        }
577
        /* buf1 & buf2 are local, so we have to flush */
578 40
        return (ved_bytes(ecx, VDP_FLUSH, NULL, 0));
579 50
}
580
581
static const struct vdp ved_pretend_gz = {
582
        .name =         "PGZ",
583
        .bytes =        ved_pretend_gzip_bytes,
584
        .fini =         ved_pretend_gzip_fini,
585
};
586
587
/*---------------------------------------------------------------------
588
 * Include a gzipped object in a gzipped ESI object delivery
589
 *
590
 * This is the interesting case: Deliver all the deflate blocks, stripping
591
 * the "LAST" bit of the last one and padding it, as necessary, to a byte
592
 * boundary.
593
 *
594
 */
595
596
struct ved_foo {
597
        unsigned                magic;
598
#define VED_FOO_MAGIC           0x6a5a262d
599
        struct ecx              *ecx;
600
        struct objcore          *objcore;
601
        uint64_t                start, last, stop, lpad;
602
        ssize_t                 ll;
603
        uint64_t                olen;
604
        uint8_t                 dbits[8];
605
        uint8_t                 tailbuf[8];
606
};
607
608
static int v_matchproto_(vdp_init_f)
609 41
ved_gzgz_init(VRT_CTX, struct vdp_ctx *vdc, void **priv)
610
{
611
        ssize_t l;
612
        const char *p;
613
        struct ved_foo *foo;
614
615 41
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
616 41
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
617 41
        AN(priv);
618
619 41
        CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC);
620 41
        CHECK_OBJ_NOTNULL(foo->objcore, OBJCORE_MAGIC);
621
622 41
        memset(foo->tailbuf, 0xdd, sizeof foo->tailbuf);
623
624 41
        AN(ObjCheckFlag(vdc->wrk, foo->objcore, OF_GZIPED));
625
626 41
        p = ObjGetAttr(vdc->wrk, foo->objcore, OA_GZIPBITS, &l);
627 41
        AN(p);
628 41
        assert(l == 32);
629 41
        foo->start = vbe64dec(p);
630 41
        foo->last = vbe64dec(p + 8);
631 41
        foo->stop = vbe64dec(p + 16);
632 41
        foo->olen = ObjGetLen(vdc->wrk, foo->objcore);
633 41
        assert(foo->start > 0 && foo->start < foo->olen * 8);
634 41
        assert(foo->last > 0 && foo->last < foo->olen * 8);
635 41
        assert(foo->stop > 0 && foo->stop < foo->olen * 8);
636 41
        assert(foo->last >= foo->start);
637 41
        assert(foo->last < foo->stop);
638
639
        /* The start bit must be byte aligned. */
640 41
        AZ(foo->start & 7);
641 41
        return (0);
642
}
643
644
/*
645
 * XXX: for act == VDP_END || act == VDP_FLUSH, we send a flush more often than
646
 * we need. The VDP_END case would trip our "at most one VDP_END call" assertion
647
 * in VDP_bytes(), but ved_bytes() covers it.
648
 *
649
 * To avoid unnecessary chunks downstream, it would be nice to re-structure the
650
 * code to identify the last block, send VDP_END/VDP_FLUSH for that one and
651
 * VDP_NULL for anything before it.
652
 */
653
654
static int v_matchproto_(vdp_bytes_f)
655 60
ved_gzgz_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv,
656
    const void *ptr, ssize_t len)
657
{
658
        struct ved_foo *foo;
659
        const uint8_t *pp;
660
        ssize_t dl;
661
        ssize_t l;
662
663 60
        (void)vdc;
664 60
        CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC);
665 60
        pp = ptr;
666 60
        if (len > 0) {
667
                /* Skip over the GZIP header */
668 60
                dl = foo->start / 8 - foo->ll;
669 60
                if (dl > 0) {
670
                        /* Before foo.start, skip */
671 44
                        if (dl > len)
672 3
                                dl = len;
673 44
                        foo->ll += dl;
674 44
                        len -= dl;
675 44
                        pp += dl;
676 44
                }
677 60
        }
678 60
        if (len > 0) {
679
                /* The main body of the object */
680 57
                dl = foo->last / 8 - foo->ll;
681 57
                if (dl > 0) {
682 18
                        dl = vmin(dl, len);
683 18
                        if (ved_bytes(foo->ecx, act, pp, dl))
684 0
                                return (-1);
685 18
                        foo->ll += dl;
686 18
                        len -= dl;
687 18
                        pp += dl;
688 18
                }
689 57
        }
690 60
        if (len > 0 && foo->ll == foo->last / 8) {
691
                /* Remove the "LAST" bit */
692 41
                foo->dbits[0] = *pp;
693 41
                foo->dbits[0] &= ~(1U << (foo->last & 7));
694 41
                if (ved_bytes(foo->ecx, act, foo->dbits, 1))
695 0
                        return (-1);
696 41
                foo->ll++;
697 41
                len--;
698 41
                pp++;
699 41
        }
700 60
        if (len > 0) {
701
                /* Last block */
702 55
                dl = foo->stop / 8 - foo->ll;
703 55
                if (dl > 0) {
704 27
                        dl = vmin(dl, len);
705 27
                        if (ved_bytes(foo->ecx, act, pp, dl))
706 0
                                return (-1);
707 27
                        foo->ll += dl;
708 27
                        len -= dl;
709 27
                        pp += dl;
710 27
                }
711 55
        }
712 60
        if (len > 0 && (foo->stop & 7) && foo->ll == foo->stop / 8) {
713
                /* Add alignment to byte boundary */
714 33
                foo->dbits[1] = *pp;
715 33
                foo->ll++;
716 33
                len--;
717 33
                pp++;
718 33
                switch ((int)(foo->stop & 7)) {
719
                case 1: /*
720
                         * x000....
721
                         * 00000000 00000000 11111111 11111111
722
                         */
723
                case 3: /*
724
                         * xxx000..
725
                         * 00000000 00000000 11111111 11111111
726
                         */
727
                case 5: /*
728
                         * xxxxx000
729
                         * 00000000 00000000 11111111 11111111
730
                         */
731 6
                        foo->dbits[2] = 0x00; foo->dbits[3] = 0x00;
732 6
                        foo->dbits[4] = 0xff; foo->dbits[5] = 0xff;
733 6
                        foo->lpad = 5;
734 6
                        break;
735
                case 2: /* xx010000 00000100 00000001 00000000 */
736 19
                        foo->dbits[1] |= 0x08;
737 19
                        foo->dbits[2] = 0x20;
738 19
                        foo->dbits[3] = 0x80;
739 19
                        foo->dbits[4] = 0x00;
740 19
                        foo->lpad = 4;
741 19
                        break;
742
                case 4: /* xxxx0100 00000001 00000000 */
743 2
                        foo->dbits[1] |= 0x20;
744 2
                        foo->dbits[2] = 0x80;
745 2
                        foo->dbits[3] = 0x00;
746 2
                        foo->lpad = 3;
747 2
                        break;
748
                case 6: /* xxxxxx01 00000000 */
749 4
                        foo->dbits[1] |= 0x80;
750 4
                        foo->dbits[2] = 0x00;
751 4
                        foo->lpad = 2;
752 4
                        break;
753
                case 7: /*
754
                         * xxxxxxx0
755
                         * 00......
756
                         * 00000000 00000000 11111111 11111111
757
                         */
758 2
                        foo->dbits[2] = 0x00;
759 2
                        foo->dbits[3] = 0x00; foo->dbits[4] = 0x00;
760 2
                        foo->dbits[5] = 0xff; foo->dbits[6] = 0xff;
761 2
                        foo->lpad = 6;
762 2
                        break;
763 0
                case 0: /* xxxxxxxx */
764
                default:
765 0
                        WRONG("compiler must be broken");
766 0
                }
767 33
                if (ved_bytes(foo->ecx, act, foo->dbits + 1, foo->lpad))
768 0
                        return (-1);
769 33
        }
770 60
        if (len > 0) {
771
                /* Recover GZIP tail */
772 49
                dl = foo->olen - foo->ll;
773 49
                assert(dl >= 0);
774 49
                if (dl > len)
775 8
                        dl = len;
776 49
                if (dl > 0) {
777 49
                        assert(dl <= 8);
778 49
                        l = foo->ll - (foo->olen - 8);
779 49
                        assert(l >= 0);
780 49
                        assert(l <= 8);
781 49
                        assert(l + dl <= 8);
782 49
                        memcpy(foo->tailbuf + l, pp, dl);
783 49
                        foo->ll += dl;
784 49
                        len -= dl;
785 49
                }
786 49
        }
787 60
        assert(len == 0);
788 60
        return (0);
789 60
}
790
791
static int v_matchproto_(vdp_fini_f)
792 41
ved_gzgz_fini(struct vdp_ctx *vdc, void **priv)
793
{
794
        uint32_t icrc;
795
        uint32_t ilen;
796
        struct ved_foo *foo;
797
798 41
        (void)vdc;
799 41
        TAKE_OBJ_NOTNULL(foo, priv, VED_FOO_MAGIC);
800
801
        /* XXX
802
         * this works due to the esi layering, a VDP pushing bytes from _fini
803
         * will otherwise have its own _bytes method called.
804
         *
805
         * Could rewrite use VDP_END
806
         */
807 41
        (void)ved_bytes(foo->ecx, VDP_FLUSH, NULL, 0);
808
809 41
        icrc = vle32dec(foo->tailbuf);
810 41
        ilen = vle32dec(foo->tailbuf + 4);
811 41
        foo->ecx->crc = crc32_combine(foo->ecx->crc, icrc, ilen);
812 41
        foo->ecx->l_crc += ilen;
813
814 41
        return (0);
815
}
816
817
static const struct vdp ved_gzgz = {
818
        .name =         "VZZ",
819
        .init =         ved_gzgz_init,
820
        .bytes =        ved_gzgz_bytes,
821
        .fini =         ved_gzgz_fini,
822
};
823
824
/*--------------------------------------------------------------------
825
 * Straight through without processing.
826
 */
827
828
static int v_matchproto_(vdp_fini_f)
829 289
ved_vdp_fini(struct vdp_ctx *vdc, void **priv)
830
{
831 289
        (void)vdc;
832 289
        *priv = NULL;
833 289
        return (0);
834
}
835
836
static int v_matchproto_(vdp_bytes_f)
837 1919
ved_vdp_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv,
838
    const void *ptr, ssize_t len)
839
{
840
        struct ecx *ecx;
841
842 1919
        (void)vdc;
843 1919
        CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC);
844 1919
        return (ved_bytes(ecx, act, ptr, len));
845
}
846
847
static const struct vdp ved_ved = {
848
        .name =         "VED",
849
        .bytes =        ved_vdp_bytes,
850
        .fini =         ved_vdp_fini,
851
};
852
853
static void
854 367
ved_close(struct req *req, int error)
855
{
856 367
        req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc);
857
858 367
        if (! error)
859 364
                return;
860 3
        req->top->topreq->vdc->retval = -1;
861 3
        req->top->topreq->doclose = req->doclose;
862 367
}
863
864
/*--------------------------------------------------------------------*/
865
866
static enum vtr_deliver_e v_matchproto_(vtr_deliver_f)
867 367
ved_deliver(struct req *req, int wantbody)
868
{
869 367
        int i = 0;
870
        const char *p;
871
        uint16_t status;
872
        struct ecx *ecx;
873
        struct ved_foo foo[1];
874
        struct vrt_ctx ctx[1];
875
876 367
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
877 367
        CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC);
878 367
        CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
879
880 367
        CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC);
881
882 367
        status = req->resp->status % 1000;
883
884 371
        if (FEATURE(FEATURE_ESI_INCLUDE_ONERROR) &&
885 69
            status != 200 && status != 204) {
886 4
                ved_close(req, ecx->abrt);
887 4
                return (VTR_D_DONE);
888
        }
889
890 363
        if (wantbody == 0) {
891 13
                ved_close(req, 0);
892 13
                return (VTR_D_DONE);
893
        }
894
895 350
        if (req->boc == NULL && ObjGetLen(req->wrk, req->objcore) == 0) {
896 0
                ved_close(req, 0);
897 0
                return (VTR_D_DONE);
898
        }
899
900 350
        if (http_GetHdr(req->resp, H_Content_Encoding, &p))
901 108
                i = http_coding_eq(p, gzip);
902 350
        if (i)
903 108
                i = ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED);
904
905 350
        INIT_OBJ(ctx, VRT_CTX_MAGIC);
906 350
        VCL_Req2Ctx(ctx, req);
907
908 350
        if (ecx->isgzip && i && !req->res_esi) {
909
                /* A gzipped include which is not ESI processed */
910
911
                /* OA_GZIPBITS are not valid until BOS_FINISHED */
912 42
                if (req->boc != NULL)
913 14
                        ObjWaitState(req->objcore, BOS_FINISHED);
914
915 42
                if (req->objcore->flags & OC_F_FAILED) {
916
                        /* No way of signalling errors in the middle of
917
                         * the ESI body. Omit this ESI fragment.
918
                         * XXX change error argument to 1
919
                         */
920 1
                        ved_close(req, 0);
921 1
                        return (VTR_D_DONE);
922
                }
923
924 41
                INIT_OBJ(foo, VED_FOO_MAGIC);
925 41
                foo->ecx = ecx;
926 41
                foo->objcore = req->objcore;
927 41
                i = VDP_Push(ctx, req->vdc, req->ws, &ved_gzgz, foo);
928 349
        } else if (ecx->isgzip && !i) {
929
                /* Non-Gzip'ed include in gzipped parent */
930 18
                i = VDP_Push(ctx, req->vdc, req->ws, &ved_pretend_gz, ecx);
931 18
        } else {
932
                /* Anything else goes straight through */
933 290
                i = VDP_Push(ctx, req->vdc, req->ws, &ved_ved, ecx);
934
        }
935
936 349
        if (i == 0) {
937 348
                i = VDP_DeliverObj(req->vdc, req->objcore);
938 348
        } else {
939 1
                VSLb(req->vsl, SLT_Error, "Failure to push ESI processors");
940 1
                req->doclose = SC_OVERLOAD;
941
        }
942
943 349
        if (i && req->doclose == SC_NULL)
944 4
                req->doclose = SC_REM_CLOSE;
945
946 349
        ved_close(req, i && ecx->abrt ? 1 : 0);
947 349
        return (VTR_D_DONE);
948 367
}