| | varnish-cache/bin/varnishd/cache/cache_esi_deliver.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2011 Varnish Software AS |
| 2 |
|
* All rights reserved. |
| 3 |
|
* |
| 4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 5 |
|
* |
| 6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 7 |
|
* |
| 8 |
|
* Redistribution and use in source and binary forms, with or without |
| 9 |
|
* modification, are permitted provided that the following conditions |
| 10 |
|
* are met: |
| 11 |
|
* 1. Redistributions of source code must retain the above copyright |
| 12 |
|
* notice, this list of conditions and the following disclaimer. |
| 13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 14 |
|
* notice, this list of conditions and the following disclaimer in the |
| 15 |
|
* documentation and/or other materials provided with the distribution. |
| 16 |
|
* |
| 17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 27 |
|
* SUCH DAMAGE. |
| 28 |
|
* |
| 29 |
|
* VED - Varnish Esi Delivery |
| 30 |
|
*/ |
| 31 |
|
|
| 32 |
|
#include "config.h" |
| 33 |
|
|
| 34 |
|
#include "cache_varnishd.h" |
| 35 |
|
|
| 36 |
|
#include <stdlib.h> |
| 37 |
|
|
| 38 |
|
#include "cache_transport.h" |
| 39 |
|
#include "cache_filter.h" |
| 40 |
|
#include "cache_vgz.h" |
| 41 |
|
|
| 42 |
|
#include "vct.h" |
| 43 |
|
#include "vtim.h" |
| 44 |
|
#include "cache_esi.h" |
| 45 |
|
#include "vend.h" |
| 46 |
|
#include "vgz.h" |
| 47 |
|
|
| 48 |
|
static vtr_deliver_f ved_deliver; |
| 49 |
|
static vtr_reembark_f ved_reembark; |
| 50 |
|
|
| 51 |
|
static const uint8_t gzip_hdr[] = { |
| 52 |
|
0x1f, 0x8b, 0x08, |
| 53 |
|
0x00, 0x00, 0x00, 0x00, |
| 54 |
|
0x00, |
| 55 |
|
0x02, 0x03 |
| 56 |
|
}; |
| 57 |
|
|
| 58 |
|
struct ecx { |
| 59 |
|
unsigned magic; |
| 60 |
|
#define ECX_MAGIC 0x0b0f9163 |
| 61 |
|
const uint8_t *p; |
| 62 |
|
const uint8_t *e; |
| 63 |
|
int state; |
| 64 |
|
ssize_t l; |
| 65 |
|
int isgzip; |
| 66 |
|
int woken; |
| 67 |
|
int abrt; |
| 68 |
|
|
| 69 |
|
struct req *preq; |
| 70 |
|
struct ecx *pecx; |
| 71 |
|
ssize_t l_crc; |
| 72 |
|
uint32_t crc; |
| 73 |
|
}; |
| 74 |
|
|
| 75 |
|
static int v_matchproto_(vtr_minimal_response_f) |
| 76 |
0 |
ved_minimal_response(struct req *req, uint16_t status) |
| 77 |
|
{ |
| 78 |
0 |
(void)req; |
| 79 |
0 |
(void)status; |
| 80 |
0 |
WRONG("esi:includes should not try minimal responses"); |
| 81 |
0 |
} |
| 82 |
|
|
| 83 |
|
static const struct transport VED_transport = { |
| 84 |
|
.magic = TRANSPORT_MAGIC, |
| 85 |
|
.name = "ESI_INCLUDE", |
| 86 |
|
.deliver = ved_deliver, |
| 87 |
|
.reembark = ved_reembark, |
| 88 |
|
.minimal_response = ved_minimal_response, |
| 89 |
|
}; |
| 90 |
|
|
| 91 |
|
/*--------------------------------------------------------------------*/ |
| 92 |
|
|
| 93 |
|
static void v_matchproto_(vtr_reembark_f) |
| 94 |
41 |
ved_reembark(struct worker *wrk, struct req *req) |
| 95 |
|
{ |
| 96 |
|
struct ecx *ecx; |
| 97 |
|
|
| 98 |
41 |
(void)wrk; |
| 99 |
41 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
| 100 |
41 |
CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); |
| 101 |
41 |
Lck_Lock(&req->sp->mtx); |
| 102 |
41 |
ecx->woken = 1; |
| 103 |
41 |
PTOK(pthread_cond_signal(&ecx->preq->wrk->cond)); |
| 104 |
41 |
Lck_Unlock(&req->sp->mtx); |
| 105 |
41 |
} |
| 106 |
|
|
| 107 |
|
/*--------------------------------------------------------------------*/ |
| 108 |
|
|
| 109 |
|
static void |
| 110 |
17281 |
ved_include(struct req *preq, const char *src, const char *host, |
| 111 |
|
struct ecx *ecx) |
| 112 |
|
{ |
| 113 |
|
struct worker *wrk; |
| 114 |
|
struct sess *sp; |
| 115 |
|
struct req *req; |
| 116 |
|
enum req_fsm_nxt s; |
| 117 |
|
|
| 118 |
17281 |
CHECK_OBJ_NOTNULL(preq, REQ_MAGIC); |
| 119 |
17281 |
CHECK_OBJ_NOTNULL(preq->top, REQTOP_MAGIC); |
| 120 |
17281 |
sp = preq->sp; |
| 121 |
17281 |
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); |
| 122 |
17281 |
CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC); |
| 123 |
17281 |
wrk = preq->wrk; |
| 124 |
|
|
| 125 |
17281 |
if (preq->esi_level >= cache_param->max_esi_depth) { |
| 126 |
5280 |
VSLb(preq->vsl, SLT_VCL_Error, |
| 127 |
|
"ESI depth limit reached (param max_esi_depth = %u)", |
| 128 |
2640 |
cache_param->max_esi_depth); |
| 129 |
2640 |
if (ecx->abrt) |
| 130 |
40 |
preq->top->topreq->vdc->retval = -1; |
| 131 |
2640 |
return; |
| 132 |
|
} |
| 133 |
|
|
| 134 |
14641 |
req = Req_New(sp, preq); |
| 135 |
14641 |
AN(req); |
| 136 |
14641 |
THR_SetRequest(req); |
| 137 |
14641 |
assert(IS_NO_VXID(req->vsl->wid)); |
| 138 |
14641 |
req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER); |
| 139 |
|
|
| 140 |
14641 |
wrk->stats->esi_req++; |
| 141 |
14641 |
req->esi_level = preq->esi_level + 1; |
| 142 |
|
|
| 143 |
29282 |
VSLb(req->vsl, SLT_Begin, "req %ju esi %u", |
| 144 |
14641 |
(uintmax_t)VXID(preq->vsl->wid), req->esi_level); |
| 145 |
29282 |
VSLb(preq->vsl, SLT_Link, "req %ju esi %u", |
| 146 |
14641 |
(uintmax_t)VXID(req->vsl->wid), req->esi_level); |
| 147 |
|
|
| 148 |
14641 |
VSLb_ts_req(req, "Start", W_TIM_real(wrk)); |
| 149 |
|
|
| 150 |
14641 |
HTTP_Setup(req->http, req->ws, req->vsl, SLT_ReqMethod); |
| 151 |
14641 |
HTTP_Dup(req->http, preq->http0); |
| 152 |
|
|
| 153 |
14641 |
http_SetH(req->http, HTTP_HDR_URL, src); |
| 154 |
14641 |
if (host != NULL && *host != '\0') { |
| 155 |
80 |
http_Unset(req->http, H_Host); |
| 156 |
80 |
http_SetHeader(req->http, host); |
| 157 |
80 |
} |
| 158 |
|
|
| 159 |
14641 |
http_ForceField(req->http, HTTP_HDR_METHOD, "GET"); |
| 160 |
14641 |
http_ForceField(req->http, HTTP_HDR_PROTO, "HTTP/1.1"); |
| 161 |
|
|
| 162 |
|
/* Don't allow conditionals, we can't use a 304 */ |
| 163 |
14641 |
http_Unset(req->http, H_If_Modified_Since); |
| 164 |
14641 |
http_Unset(req->http, H_If_None_Match); |
| 165 |
|
|
| 166 |
|
/* Don't allow Range */ |
| 167 |
14641 |
http_Unset(req->http, H_Range); |
| 168 |
|
|
| 169 |
|
/* Set Accept-Encoding according to what we want */ |
| 170 |
14641 |
if (ecx->isgzip) |
| 171 |
5160 |
http_ForceHeader(req->http, H_Accept_Encoding, "gzip"); |
| 172 |
|
else |
| 173 |
9479 |
http_Unset(req->http, H_Accept_Encoding); |
| 174 |
|
|
| 175 |
|
/* Client content already taken care of */ |
| 176 |
14639 |
http_Unset(req->http, H_Content_Length); |
| 177 |
14639 |
http_Unset(req->http, H_Transfer_Encoding); |
| 178 |
14639 |
req->req_body_status = BS_NONE; |
| 179 |
|
|
| 180 |
14639 |
AZ(req->vcl); |
| 181 |
14639 |
assert(req->top == preq->top); |
| 182 |
14639 |
if (req->top->vcl0) |
| 183 |
80 |
req->vcl = req->top->vcl0; |
| 184 |
|
else |
| 185 |
14559 |
req->vcl = preq->vcl; |
| 186 |
14639 |
VCL_Ref(req->vcl); |
| 187 |
|
|
| 188 |
14639 |
assert(req->req_step == R_STP_TRANSPORT); |
| 189 |
14639 |
req->t_req = preq->t_req; |
| 190 |
|
|
| 191 |
14639 |
req->transport = &VED_transport; |
| 192 |
14639 |
req->transport_priv = ecx; |
| 193 |
|
|
| 194 |
14639 |
VCL_TaskEnter(req->privs); |
| 195 |
|
|
| 196 |
14680 |
while (1) { |
| 197 |
14680 |
CNT_Embark(wrk, req); |
| 198 |
14680 |
ecx->woken = 0; |
| 199 |
14680 |
s = CNT_Request(req); |
| 200 |
14680 |
if (s == REQ_FSM_DONE) |
| 201 |
14639 |
break; |
| 202 |
41 |
DSL(DBG_WAITINGLIST, req->vsl->wid, |
| 203 |
|
"waiting for ESI (%d)", (int)s); |
| 204 |
41 |
assert(s == REQ_FSM_DISEMBARK); |
| 205 |
41 |
Lck_Lock(&sp->mtx); |
| 206 |
41 |
if (!ecx->woken) |
| 207 |
41 |
(void)Lck_CondWait(&ecx->preq->wrk->cond, &sp->mtx); |
| 208 |
41 |
Lck_Unlock(&sp->mtx); |
| 209 |
41 |
AZ(req->wrk); |
| 210 |
|
} |
| 211 |
|
|
| 212 |
14639 |
VCL_Rel(&req->vcl); |
| 213 |
|
|
| 214 |
14639 |
req->wrk = NULL; |
| 215 |
14639 |
THR_SetRequest(preq); |
| 216 |
|
|
| 217 |
14639 |
Req_Cleanup(sp, wrk, req); |
| 218 |
14639 |
Req_Release(req); |
| 219 |
17279 |
} |
| 220 |
|
|
| 221 |
|
/*--------------------------------------------------------------------*/ |
| 222 |
|
|
| 223 |
|
//#define Debug(fmt, ...) printf(fmt, __VA_ARGS__) |
| 224 |
|
#define Debug(fmt, ...) /**/ |
| 225 |
|
|
| 226 |
|
static ssize_t |
| 227 |
60719 |
ved_decode_len(struct vsl_log *vsl, const uint8_t **pp) |
| 228 |
|
{ |
| 229 |
|
const uint8_t *p; |
| 230 |
|
ssize_t l; |
| 231 |
|
|
| 232 |
60719 |
p = *pp; |
| 233 |
60719 |
switch (*p & 15) { |
| 234 |
|
case 1: |
| 235 |
52999 |
l = p[1]; |
| 236 |
52999 |
p += 2; |
| 237 |
52999 |
break; |
| 238 |
|
case 2: |
| 239 |
7640 |
l = vbe16dec(p + 1); |
| 240 |
7640 |
p += 3; |
| 241 |
7640 |
break; |
| 242 |
|
case 8: |
| 243 |
80 |
l = vbe64dec(p + 1); |
| 244 |
80 |
p += 9; |
| 245 |
80 |
break; |
| 246 |
|
default: |
| 247 |
0 |
VSLb(vsl, SLT_Error, |
| 248 |
0 |
"ESI-corruption: Illegal Length %d %d\n", *p, (*p & 15)); |
| 249 |
0 |
WRONG("ESI-codes: illegal length"); |
| 250 |
0 |
} |
| 251 |
60719 |
*pp = p; |
| 252 |
60719 |
assert(l > 0); |
| 253 |
60719 |
return (l); |
| 254 |
|
} |
| 255 |
|
|
| 256 |
|
/*--------------------------------------------------------------------- |
| 257 |
|
*/ |
| 258 |
|
|
| 259 |
|
static int v_matchproto_(vdp_init_f) |
| 260 |
8320 |
ved_vdp_esi_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) |
| 261 |
|
{ |
| 262 |
|
struct ecx *ecx; |
| 263 |
|
|
| 264 |
8320 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 265 |
8320 |
CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC); |
| 266 |
8320 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 267 |
8320 |
CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); |
| 268 |
8320 |
CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); |
| 269 |
8320 |
AN(vdc->clen); |
| 270 |
8320 |
AN(priv); |
| 271 |
|
|
| 272 |
8320 |
AZ(*priv); |
| 273 |
8320 |
if (vdc->oc == NULL || !ObjHasAttr(vdc->wrk, vdc->oc, OA_ESIDATA)) |
| 274 |
0 |
return (1); |
| 275 |
|
|
| 276 |
8320 |
if (ctx->req == NULL) { |
| 277 |
0 |
VSLb(vdc->vsl, SLT_Error, |
| 278 |
|
"esi can only be used on the client side"); |
| 279 |
0 |
return (1); |
| 280 |
|
} |
| 281 |
|
|
| 282 |
8320 |
ALLOC_OBJ(ecx, ECX_MAGIC); |
| 283 |
8320 |
AN(ecx); |
| 284 |
8320 |
assert(sizeof gzip_hdr == 10); |
| 285 |
8320 |
ecx->preq = ctx->req; |
| 286 |
8320 |
*priv = ecx; |
| 287 |
8320 |
RFC2616_Weaken_Etag(vdc->hp); |
| 288 |
|
|
| 289 |
8320 |
ctx->req->res_esi = 1; |
| 290 |
8320 |
if (*vdc->clen != 0) |
| 291 |
8320 |
*vdc->clen = -1; |
| 292 |
8320 |
if (ctx->req->esi_level > 0) { |
| 293 |
3360 |
assert(ctx->req->transport == &VED_transport); |
| 294 |
3360 |
CAST_OBJ_NOTNULL(ecx->pecx, ctx->req->transport_priv, ECX_MAGIC); |
| 295 |
3360 |
if (!ecx->pecx->isgzip) |
| 296 |
640 |
ecx->pecx = NULL; |
| 297 |
3360 |
} |
| 298 |
|
|
| 299 |
8320 |
return (0); |
| 300 |
8320 |
} |
| 301 |
|
|
| 302 |
|
static int v_matchproto_(vdp_fini_f) |
| 303 |
8320 |
ved_vdp_esi_fini(struct vdp_ctx *vdc, void **priv) |
| 304 |
|
{ |
| 305 |
|
struct ecx *ecx; |
| 306 |
|
|
| 307 |
8320 |
(void)vdc; |
| 308 |
8320 |
TAKE_OBJ_NOTNULL(ecx, priv, ECX_MAGIC); |
| 309 |
8320 |
FREE_OBJ(ecx); |
| 310 |
8320 |
return (0); |
| 311 |
|
} |
| 312 |
|
|
| 313 |
|
static int v_matchproto_(vdp_bytes_f) |
| 314 |
10560 |
ved_vdp_esi_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
| 315 |
|
const void *ptr, ssize_t len) |
| 316 |
|
{ |
| 317 |
|
const uint8_t *q, *r; |
| 318 |
10560 |
ssize_t l = 0; |
| 319 |
10560 |
uint32_t icrc = 0; |
| 320 |
|
uint8_t tailbuf[8 + 5]; |
| 321 |
|
const uint8_t *pp; |
| 322 |
|
struct ecx *ecx; |
| 323 |
10560 |
int retval = 0; |
| 324 |
|
|
| 325 |
10560 |
if (act == VDP_END) |
| 326 |
8280 |
act = VDP_FLUSH; |
| 327 |
|
|
| 328 |
10560 |
AN(priv); |
| 329 |
10560 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 330 |
10560 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
| 331 |
10560 |
pp = ptr; |
| 332 |
|
|
| 333 |
143318 |
while (1) { |
| 334 |
143318 |
switch (ecx->state) { |
| 335 |
|
case 0: |
| 336 |
8280 |
ecx->p = ObjGetAttr(vdc->wrk, ecx->preq->objcore, |
| 337 |
|
OA_ESIDATA, &l); |
| 338 |
8280 |
AN(ecx->p); |
| 339 |
8280 |
assert(l > 0); |
| 340 |
8280 |
ecx->e = ecx->p + l; |
| 341 |
|
|
| 342 |
8280 |
if (*ecx->p == VEC_GZ) { |
| 343 |
4280 |
if (ecx->pecx == NULL) |
| 344 |
1640 |
retval = VDP_bytes(vdc, VDP_NULL, |
| 345 |
|
gzip_hdr, 10); |
| 346 |
4280 |
ecx->l_crc = 0; |
| 347 |
4280 |
ecx->crc = crc32(0L, Z_NULL, 0); |
| 348 |
4280 |
ecx->isgzip = 1; |
| 349 |
4280 |
ecx->p++; |
| 350 |
4280 |
} |
| 351 |
8280 |
ecx->state = 1; |
| 352 |
8280 |
break; |
| 353 |
|
case 1: |
| 354 |
75039 |
if (ecx->p >= ecx->e) { |
| 355 |
8000 |
ecx->state = 2; |
| 356 |
8000 |
break; |
| 357 |
|
} |
| 358 |
67039 |
switch (*ecx->p) { |
| 359 |
|
case VEC_V1: |
| 360 |
|
case VEC_V2: |
| 361 |
|
case VEC_V8: |
| 362 |
23319 |
ecx->l = ved_decode_len(vdc->vsl, &ecx->p); |
| 363 |
23319 |
if (ecx->l < 0) |
| 364 |
0 |
return (-1); |
| 365 |
23319 |
if (ecx->isgzip) { |
| 366 |
11000 |
assert(*ecx->p == VEC_C1 || |
| 367 |
|
*ecx->p == VEC_C2 || |
| 368 |
|
*ecx->p == VEC_C8); |
| 369 |
11000 |
l = ved_decode_len(vdc->vsl, &ecx->p); |
| 370 |
11000 |
if (l < 0) |
| 371 |
0 |
return (-1); |
| 372 |
11000 |
icrc = vbe32dec(ecx->p); |
| 373 |
11000 |
ecx->p += 4; |
| 374 |
11000 |
ecx->crc = crc32_combine( |
| 375 |
11000 |
ecx->crc, icrc, l); |
| 376 |
11000 |
ecx->l_crc += l; |
| 377 |
11000 |
} |
| 378 |
23319 |
ecx->state = 3; |
| 379 |
23319 |
break; |
| 380 |
|
case VEC_S1: |
| 381 |
|
case VEC_S2: |
| 382 |
|
case VEC_S8: |
| 383 |
26400 |
ecx->l = ved_decode_len(vdc->vsl, &ecx->p); |
| 384 |
26400 |
if (ecx->l < 0) |
| 385 |
0 |
return (-1); |
| 386 |
|
Debug("SKIP1(%d)\n", (int)ecx->l); |
| 387 |
26400 |
ecx->state = 4; |
| 388 |
26400 |
break; |
| 389 |
|
case VEC_IA: |
| 390 |
12120 |
ecx->abrt = |
| 391 |
12120 |
FEATURE(FEATURE_ESI_INCLUDE_ONERROR); |
| 392 |
|
/* FALLTHROUGH */ |
| 393 |
|
case VEC_IC: |
| 394 |
17320 |
ecx->p++; |
| 395 |
17320 |
q = (void*)strchr((const char*)ecx->p, '\0'); |
| 396 |
17320 |
AN(q); |
| 397 |
17320 |
q++; |
| 398 |
17320 |
r = (void*)strchr((const char*)q, '\0'); |
| 399 |
17320 |
AN(r); |
| 400 |
17320 |
if (VDP_bytes(vdc, VDP_FLUSH, NULL, 0)) { |
| 401 |
40 |
ecx->p = ecx->e; |
| 402 |
40 |
break; |
| 403 |
|
} |
| 404 |
|
Debug("INCL [%s][%s] BEGIN\n", q, ecx->p); |
| 405 |
34560 |
ved_include(ecx->preq, |
| 406 |
17280 |
(const char*)q, (const char*)ecx->p, ecx); |
| 407 |
|
Debug("INCL [%s][%s] END\n", q, ecx->p); |
| 408 |
17280 |
ecx->p = r + 1; |
| 409 |
17280 |
break; |
| 410 |
|
default: |
| 411 |
0 |
VSLb(vdc->vsl, SLT_Error, |
| 412 |
|
"ESI corruption line %d 0x%02x [%s]\n", |
| 413 |
0 |
__LINE__, *ecx->p, ecx->p); |
| 414 |
0 |
WRONG("ESI-codes: Illegal code"); |
| 415 |
0 |
} |
| 416 |
67039 |
break; |
| 417 |
|
case 2: |
| 418 |
8000 |
ptr = NULL; |
| 419 |
8000 |
len = 0; |
| 420 |
8000 |
if (ecx->isgzip && ecx->pecx == NULL) { |
| 421 |
|
/* |
| 422 |
|
* We are bytealigned here, so simply emit |
| 423 |
|
* a gzip literal block with finish bit set. |
| 424 |
|
*/ |
| 425 |
1640 |
tailbuf[0] = 0x01; |
| 426 |
1640 |
tailbuf[1] = 0x00; |
| 427 |
1640 |
tailbuf[2] = 0x00; |
| 428 |
1640 |
tailbuf[3] = 0xff; |
| 429 |
1640 |
tailbuf[4] = 0xff; |
| 430 |
|
|
| 431 |
|
/* Emit CRC32 */ |
| 432 |
1640 |
vle32enc(tailbuf + 5, ecx->crc); |
| 433 |
|
|
| 434 |
|
/* MOD(2^32) length */ |
| 435 |
1640 |
vle32enc(tailbuf + 9, ecx->l_crc); |
| 436 |
|
|
| 437 |
1640 |
ptr = tailbuf; |
| 438 |
1640 |
len = 13; |
| 439 |
8000 |
} else if (ecx->pecx != NULL) { |
| 440 |
5440 |
ecx->pecx->crc = crc32_combine(ecx->pecx->crc, |
| 441 |
2720 |
ecx->crc, ecx->l_crc); |
| 442 |
2720 |
ecx->pecx->l_crc += ecx->l_crc; |
| 443 |
2720 |
} |
| 444 |
8000 |
retval = VDP_bytes(vdc, VDP_END, ptr, len); |
| 445 |
8000 |
ecx->state = 99; |
| 446 |
8000 |
return (retval); |
| 447 |
|
case 3: |
| 448 |
|
case 4: |
| 449 |
|
/* |
| 450 |
|
* There is no guarantee that the 'l' bytes are all |
| 451 |
|
* in the same storage segment, so loop over storage |
| 452 |
|
* until we have processed them all. |
| 453 |
|
*/ |
| 454 |
51599 |
if (ecx->l <= len) { |
| 455 |
49719 |
if (ecx->state == 3) |
| 456 |
46640 |
retval = VDP_bytes(vdc, act, |
| 457 |
23320 |
pp, ecx->l); |
| 458 |
49719 |
len -= ecx->l; |
| 459 |
49719 |
pp += ecx->l; |
| 460 |
49719 |
ecx->state = 1; |
| 461 |
49719 |
break; |
| 462 |
|
} |
| 463 |
1880 |
if (ecx->state == 3 && len > 0) |
| 464 |
680 |
retval = VDP_bytes(vdc, act, pp, len); |
| 465 |
1880 |
ecx->l -= len; |
| 466 |
1880 |
return (retval); |
| 467 |
|
case 99: |
| 468 |
|
/* |
| 469 |
|
* VEP does not account for the PAD+CRC+LEN |
| 470 |
|
* so we can see up to approx 15 bytes here. |
| 471 |
|
*/ |
| 472 |
400 |
return (retval); |
| 473 |
|
default: |
| 474 |
0 |
WRONG("FOO"); |
| 475 |
0 |
break; |
| 476 |
|
} |
| 477 |
133038 |
if (retval) |
| 478 |
280 |
return (retval); |
| 479 |
|
} |
| 480 |
10560 |
} |
| 481 |
|
|
| 482 |
|
const struct vdp VDP_esi = { |
| 483 |
|
.name = "esi", |
| 484 |
|
.init = ved_vdp_esi_init, |
| 485 |
|
.bytes = ved_vdp_esi_bytes, |
| 486 |
|
.fini = ved_vdp_esi_fini, |
| 487 |
|
}; |
| 488 |
|
|
| 489 |
|
/* |
| 490 |
|
* Account body bytes on req |
| 491 |
|
* Push bytes to preq |
| 492 |
|
*/ |
| 493 |
|
static inline int |
| 494 |
88279 |
ved_bytes(struct ecx *ecx, enum vdp_action act, |
| 495 |
|
const void *ptr, ssize_t len) |
| 496 |
|
{ |
| 497 |
88279 |
if (act == VDP_END) |
| 498 |
14800 |
act = VDP_FLUSH; |
| 499 |
88279 |
return (VDP_bytes(ecx->preq->vdc, act, ptr, len)); |
| 500 |
|
} |
| 501 |
|
|
| 502 |
|
/*--------------------------------------------------------------------- |
| 503 |
|
* If a gzipped ESI object includes a ungzipped object, we need to make |
| 504 |
|
* it looked like a gzipped data stream. The official way to do so would |
| 505 |
|
* be to fire up libvgz and gzip it, but we don't, we fake it. |
| 506 |
|
* |
| 507 |
|
* First, we cannot know if it is ungzipped on purpose, the admin may |
| 508 |
|
* know something we don't. |
| 509 |
|
* |
| 510 |
|
* What do you mean "BS ?" |
| 511 |
|
* |
| 512 |
|
* All right then... |
| 513 |
|
* |
| 514 |
|
* The matter of the fact is that we simply will not fire up a gzip in |
| 515 |
|
* the output path because it costs too much memory and CPU, so we simply |
| 516 |
|
* wrap the data in very convenient "gzip copy-blocks" and send it down |
| 517 |
|
* the stream with a bit more overhead. |
| 518 |
|
*/ |
| 519 |
|
|
| 520 |
|
static int v_matchproto_(vdp_fini_f) |
| 521 |
720 |
ved_pretend_gzip_fini(struct vdp_ctx *vdc, void **priv) |
| 522 |
|
{ |
| 523 |
720 |
(void)vdc; |
| 524 |
720 |
*priv = NULL; |
| 525 |
720 |
return (0); |
| 526 |
|
} |
| 527 |
|
|
| 528 |
|
static int v_matchproto_(vdp_bytes_f) |
| 529 |
2000 |
ved_pretend_gzip_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
| 530 |
|
const void *pv, ssize_t l) |
| 531 |
|
{ |
| 532 |
|
uint8_t buf1[5], buf2[5]; |
| 533 |
|
const uint8_t *p; |
| 534 |
|
uint16_t lx; |
| 535 |
|
struct ecx *ecx; |
| 536 |
|
|
| 537 |
2000 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 538 |
2000 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
| 539 |
|
|
| 540 |
2000 |
(void)priv; |
| 541 |
2000 |
if (l == 0) |
| 542 |
400 |
return (ved_bytes(ecx, act, pv, l)); |
| 543 |
|
|
| 544 |
1600 |
p = pv; |
| 545 |
|
|
| 546 |
1600 |
AN (ecx->isgzip); |
| 547 |
1600 |
ecx->crc = crc32(ecx->crc, p, l); |
| 548 |
1600 |
ecx->l_crc += l; |
| 549 |
|
|
| 550 |
|
/* |
| 551 |
|
* buf1 can safely be emitted multiple times for objects longer |
| 552 |
|
* than 64K-1 bytes. |
| 553 |
|
*/ |
| 554 |
1600 |
lx = 65535; |
| 555 |
1600 |
buf1[0] = 0; |
| 556 |
1600 |
vle16enc(buf1 + 1, lx); |
| 557 |
1600 |
vle16enc(buf1 + 3, ~lx); |
| 558 |
|
|
| 559 |
3200 |
while (l > 0) { |
| 560 |
1600 |
if (l >= 65535) { |
| 561 |
0 |
lx = 65535; |
| 562 |
0 |
if (ved_bytes(ecx, VDP_NULL, buf1, sizeof buf1)) |
| 563 |
0 |
return (-1); |
| 564 |
0 |
} else { |
| 565 |
1600 |
lx = (uint16_t)l; |
| 566 |
1600 |
buf2[0] = 0; |
| 567 |
1600 |
vle16enc(buf2 + 1, lx); |
| 568 |
1600 |
vle16enc(buf2 + 3, ~lx); |
| 569 |
1600 |
if (ved_bytes(ecx, VDP_NULL, buf2, sizeof buf2)) |
| 570 |
0 |
return (-1); |
| 571 |
|
} |
| 572 |
1600 |
if (ved_bytes(ecx, VDP_NULL, p, lx)) |
| 573 |
0 |
return (-1); |
| 574 |
1600 |
l -= lx; |
| 575 |
1600 |
p += lx; |
| 576 |
|
} |
| 577 |
|
/* buf1 & buf2 are local, so we have to flush */ |
| 578 |
1600 |
return (ved_bytes(ecx, VDP_FLUSH, NULL, 0)); |
| 579 |
2000 |
} |
| 580 |
|
|
| 581 |
|
static const struct vdp ved_pretend_gz = { |
| 582 |
|
.name = "PGZ", |
| 583 |
|
.bytes = ved_pretend_gzip_bytes, |
| 584 |
|
.fini = ved_pretend_gzip_fini, |
| 585 |
|
}; |
| 586 |
|
|
| 587 |
|
/*--------------------------------------------------------------------- |
| 588 |
|
* Include a gzipped object in a gzipped ESI object delivery |
| 589 |
|
* |
| 590 |
|
* This is the interesting case: Deliver all the deflate blocks, stripping |
| 591 |
|
* the "LAST" bit of the last one and padding it, as necessary, to a byte |
| 592 |
|
* boundary. |
| 593 |
|
* |
| 594 |
|
*/ |
| 595 |
|
|
| 596 |
|
struct ved_foo { |
| 597 |
|
unsigned magic; |
| 598 |
|
#define VED_FOO_MAGIC 0x6a5a262d |
| 599 |
|
struct ecx *ecx; |
| 600 |
|
struct objcore *objcore; |
| 601 |
|
uint64_t start, last, stop, lpad; |
| 602 |
|
ssize_t ll; |
| 603 |
|
uint64_t olen; |
| 604 |
|
uint8_t dbits[8]; |
| 605 |
|
uint8_t tailbuf[8]; |
| 606 |
|
}; |
| 607 |
|
|
| 608 |
|
static int v_matchproto_(vdp_init_f) |
| 609 |
1640 |
ved_gzgz_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) |
| 610 |
|
{ |
| 611 |
|
ssize_t l; |
| 612 |
|
const char *p; |
| 613 |
|
struct ved_foo *foo; |
| 614 |
|
|
| 615 |
1640 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 616 |
1640 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 617 |
1640 |
AN(priv); |
| 618 |
|
|
| 619 |
1640 |
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC); |
| 620 |
1640 |
CHECK_OBJ_NOTNULL(foo->objcore, OBJCORE_MAGIC); |
| 621 |
|
|
| 622 |
1640 |
memset(foo->tailbuf, 0xdd, sizeof foo->tailbuf); |
| 623 |
|
|
| 624 |
1640 |
AN(ObjCheckFlag(vdc->wrk, foo->objcore, OF_GZIPED)); |
| 625 |
|
|
| 626 |
1640 |
p = ObjGetAttr(vdc->wrk, foo->objcore, OA_GZIPBITS, &l); |
| 627 |
1640 |
AN(p); |
| 628 |
1640 |
assert(l == 32); |
| 629 |
1640 |
foo->start = vbe64dec(p); |
| 630 |
1640 |
foo->last = vbe64dec(p + 8); |
| 631 |
1640 |
foo->stop = vbe64dec(p + 16); |
| 632 |
1640 |
foo->olen = ObjGetLen(vdc->wrk, foo->objcore); |
| 633 |
1640 |
assert(foo->start > 0 && foo->start < foo->olen * 8); |
| 634 |
1640 |
assert(foo->last > 0 && foo->last < foo->olen * 8); |
| 635 |
1640 |
assert(foo->stop > 0 && foo->stop < foo->olen * 8); |
| 636 |
1640 |
assert(foo->last >= foo->start); |
| 637 |
1640 |
assert(foo->last < foo->stop); |
| 638 |
|
|
| 639 |
|
/* The start bit must be byte aligned. */ |
| 640 |
1640 |
AZ(foo->start & 7); |
| 641 |
1640 |
return (0); |
| 642 |
|
} |
| 643 |
|
|
| 644 |
|
/* |
| 645 |
|
* XXX: for act == VDP_END || act == VDP_FLUSH, we send a flush more often than |
| 646 |
|
* we need. The VDP_END case would trip our "at most one VDP_END call" assertion |
| 647 |
|
* in VDP_bytes(), but ved_bytes() covers it. |
| 648 |
|
* |
| 649 |
|
* To avoid unnecessary chunks downstream, it would be nice to re-structure the |
| 650 |
|
* code to identify the last block, send VDP_END/VDP_FLUSH for that one and |
| 651 |
|
* VDP_NULL for anything before it. |
| 652 |
|
*/ |
| 653 |
|
|
| 654 |
|
static int v_matchproto_(vdp_bytes_f) |
| 655 |
2400 |
ved_gzgz_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
| 656 |
|
const void *ptr, ssize_t len) |
| 657 |
|
{ |
| 658 |
|
struct ved_foo *foo; |
| 659 |
|
const uint8_t *pp; |
| 660 |
|
ssize_t dl; |
| 661 |
|
ssize_t l; |
| 662 |
|
|
| 663 |
2400 |
(void)vdc; |
| 664 |
2400 |
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC); |
| 665 |
2400 |
pp = ptr; |
| 666 |
2400 |
if (len > 0) { |
| 667 |
|
/* Skip over the GZIP header */ |
| 668 |
2400 |
dl = foo->start / 8 - foo->ll; |
| 669 |
2400 |
if (dl > 0) { |
| 670 |
|
/* Before foo.start, skip */ |
| 671 |
1760 |
if (dl > len) |
| 672 |
120 |
dl = len; |
| 673 |
1760 |
foo->ll += dl; |
| 674 |
1760 |
len -= dl; |
| 675 |
1760 |
pp += dl; |
| 676 |
1760 |
} |
| 677 |
2400 |
} |
| 678 |
2400 |
if (len > 0) { |
| 679 |
|
/* The main body of the object */ |
| 680 |
2280 |
dl = foo->last / 8 - foo->ll; |
| 681 |
2280 |
if (dl > 0) { |
| 682 |
720 |
dl = vmin(dl, len); |
| 683 |
720 |
if (ved_bytes(foo->ecx, act, pp, dl)) |
| 684 |
0 |
return (-1); |
| 685 |
720 |
foo->ll += dl; |
| 686 |
720 |
len -= dl; |
| 687 |
720 |
pp += dl; |
| 688 |
720 |
} |
| 689 |
2280 |
} |
| 690 |
2400 |
if (len > 0 && foo->ll == foo->last / 8) { |
| 691 |
|
/* Remove the "LAST" bit */ |
| 692 |
1640 |
foo->dbits[0] = *pp; |
| 693 |
1640 |
foo->dbits[0] &= ~(1U << (foo->last & 7)); |
| 694 |
1640 |
if (ved_bytes(foo->ecx, act, foo->dbits, 1)) |
| 695 |
0 |
return (-1); |
| 696 |
1640 |
foo->ll++; |
| 697 |
1640 |
len--; |
| 698 |
1640 |
pp++; |
| 699 |
1640 |
} |
| 700 |
2400 |
if (len > 0) { |
| 701 |
|
/* Last block */ |
| 702 |
2200 |
dl = foo->stop / 8 - foo->ll; |
| 703 |
2200 |
if (dl > 0) { |
| 704 |
1080 |
dl = vmin(dl, len); |
| 705 |
1080 |
if (ved_bytes(foo->ecx, act, pp, dl)) |
| 706 |
0 |
return (-1); |
| 707 |
1080 |
foo->ll += dl; |
| 708 |
1080 |
len -= dl; |
| 709 |
1080 |
pp += dl; |
| 710 |
1080 |
} |
| 711 |
2200 |
} |
| 712 |
2400 |
if (len > 0 && (foo->stop & 7) && foo->ll == foo->stop / 8) { |
| 713 |
|
/* Add alignment to byte boundary */ |
| 714 |
1320 |
foo->dbits[1] = *pp; |
| 715 |
1320 |
foo->ll++; |
| 716 |
1320 |
len--; |
| 717 |
1320 |
pp++; |
| 718 |
1320 |
switch ((int)(foo->stop & 7)) { |
| 719 |
|
case 1: /* |
| 720 |
|
* x000.... |
| 721 |
|
* 00000000 00000000 11111111 11111111 |
| 722 |
|
*/ |
| 723 |
|
case 3: /* |
| 724 |
|
* xxx000.. |
| 725 |
|
* 00000000 00000000 11111111 11111111 |
| 726 |
|
*/ |
| 727 |
|
case 5: /* |
| 728 |
|
* xxxxx000 |
| 729 |
|
* 00000000 00000000 11111111 11111111 |
| 730 |
|
*/ |
| 731 |
240 |
foo->dbits[2] = 0x00; foo->dbits[3] = 0x00; |
| 732 |
240 |
foo->dbits[4] = 0xff; foo->dbits[5] = 0xff; |
| 733 |
240 |
foo->lpad = 5; |
| 734 |
240 |
break; |
| 735 |
|
case 2: /* xx010000 00000100 00000001 00000000 */ |
| 736 |
760 |
foo->dbits[1] |= 0x08; |
| 737 |
760 |
foo->dbits[2] = 0x20; |
| 738 |
760 |
foo->dbits[3] = 0x80; |
| 739 |
760 |
foo->dbits[4] = 0x00; |
| 740 |
760 |
foo->lpad = 4; |
| 741 |
760 |
break; |
| 742 |
|
case 4: /* xxxx0100 00000001 00000000 */ |
| 743 |
80 |
foo->dbits[1] |= 0x20; |
| 744 |
80 |
foo->dbits[2] = 0x80; |
| 745 |
80 |
foo->dbits[3] = 0x00; |
| 746 |
80 |
foo->lpad = 3; |
| 747 |
80 |
break; |
| 748 |
|
case 6: /* xxxxxx01 00000000 */ |
| 749 |
160 |
foo->dbits[1] |= 0x80; |
| 750 |
160 |
foo->dbits[2] = 0x00; |
| 751 |
160 |
foo->lpad = 2; |
| 752 |
160 |
break; |
| 753 |
|
case 7: /* |
| 754 |
|
* xxxxxxx0 |
| 755 |
|
* 00...... |
| 756 |
|
* 00000000 00000000 11111111 11111111 |
| 757 |
|
*/ |
| 758 |
80 |
foo->dbits[2] = 0x00; |
| 759 |
80 |
foo->dbits[3] = 0x00; foo->dbits[4] = 0x00; |
| 760 |
80 |
foo->dbits[5] = 0xff; foo->dbits[6] = 0xff; |
| 761 |
80 |
foo->lpad = 6; |
| 762 |
80 |
break; |
| 763 |
0 |
case 0: /* xxxxxxxx */ |
| 764 |
|
default: |
| 765 |
0 |
WRONG("compiler must be broken"); |
| 766 |
0 |
} |
| 767 |
1320 |
if (ved_bytes(foo->ecx, act, foo->dbits + 1, foo->lpad)) |
| 768 |
0 |
return (-1); |
| 769 |
1320 |
} |
| 770 |
2400 |
if (len > 0) { |
| 771 |
|
/* Recover GZIP tail */ |
| 772 |
1960 |
dl = foo->olen - foo->ll; |
| 773 |
1960 |
assert(dl >= 0); |
| 774 |
1960 |
if (dl > len) |
| 775 |
320 |
dl = len; |
| 776 |
1960 |
if (dl > 0) { |
| 777 |
1960 |
assert(dl <= 8); |
| 778 |
1960 |
l = foo->ll - (foo->olen - 8); |
| 779 |
1960 |
assert(l >= 0); |
| 780 |
1960 |
assert(l <= 8); |
| 781 |
1960 |
assert(l + dl <= 8); |
| 782 |
1960 |
memcpy(foo->tailbuf + l, pp, dl); |
| 783 |
1960 |
foo->ll += dl; |
| 784 |
1960 |
len -= dl; |
| 785 |
1960 |
} |
| 786 |
1960 |
} |
| 787 |
2400 |
assert(len == 0); |
| 788 |
2400 |
return (0); |
| 789 |
2400 |
} |
| 790 |
|
|
| 791 |
|
static int v_matchproto_(vdp_fini_f) |
| 792 |
1640 |
ved_gzgz_fini(struct vdp_ctx *vdc, void **priv) |
| 793 |
|
{ |
| 794 |
|
uint32_t icrc; |
| 795 |
|
uint32_t ilen; |
| 796 |
|
struct ved_foo *foo; |
| 797 |
|
|
| 798 |
1640 |
(void)vdc; |
| 799 |
1640 |
TAKE_OBJ_NOTNULL(foo, priv, VED_FOO_MAGIC); |
| 800 |
|
|
| 801 |
|
/* XXX |
| 802 |
|
* this works due to the esi layering, a VDP pushing bytes from _fini |
| 803 |
|
* will otherwise have its own _bytes method called. |
| 804 |
|
* |
| 805 |
|
* Could rewrite use VDP_END |
| 806 |
|
*/ |
| 807 |
1640 |
(void)ved_bytes(foo->ecx, VDP_FLUSH, NULL, 0); |
| 808 |
|
|
| 809 |
1640 |
icrc = vle32dec(foo->tailbuf); |
| 810 |
1640 |
ilen = vle32dec(foo->tailbuf + 4); |
| 811 |
1640 |
foo->ecx->crc = crc32_combine(foo->ecx->crc, icrc, ilen); |
| 812 |
1640 |
foo->ecx->l_crc += ilen; |
| 813 |
|
|
| 814 |
1640 |
return (0); |
| 815 |
|
} |
| 816 |
|
|
| 817 |
|
static const struct vdp ved_gzgz = { |
| 818 |
|
.name = "VZZ", |
| 819 |
|
.init = ved_gzgz_init, |
| 820 |
|
.bytes = ved_gzgz_bytes, |
| 821 |
|
.fini = ved_gzgz_fini, |
| 822 |
|
}; |
| 823 |
|
|
| 824 |
|
/*-------------------------------------------------------------------- |
| 825 |
|
* Straight through without processing. |
| 826 |
|
*/ |
| 827 |
|
|
| 828 |
|
static int v_matchproto_(vdp_fini_f) |
| 829 |
11520 |
ved_vdp_fini(struct vdp_ctx *vdc, void **priv) |
| 830 |
|
{ |
| 831 |
11520 |
(void)vdc; |
| 832 |
11520 |
*priv = NULL; |
| 833 |
11520 |
return (0); |
| 834 |
|
} |
| 835 |
|
|
| 836 |
|
static int v_matchproto_(vdp_bytes_f) |
| 837 |
76680 |
ved_vdp_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
| 838 |
|
const void *ptr, ssize_t len) |
| 839 |
|
{ |
| 840 |
|
struct ecx *ecx; |
| 841 |
|
|
| 842 |
76680 |
(void)vdc; |
| 843 |
76680 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
| 844 |
76680 |
return (ved_bytes(ecx, act, ptr, len)); |
| 845 |
|
} |
| 846 |
|
|
| 847 |
|
static const struct vdp ved_ved = { |
| 848 |
|
.name = "VED", |
| 849 |
|
.bytes = ved_vdp_bytes, |
| 850 |
|
.fini = ved_vdp_fini, |
| 851 |
|
}; |
| 852 |
|
|
| 853 |
|
static void |
| 854 |
14639 |
ved_close(struct req *req, int error) |
| 855 |
|
{ |
| 856 |
14639 |
req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc); |
| 857 |
|
|
| 858 |
14639 |
if (! error) |
| 859 |
14519 |
return; |
| 860 |
120 |
req->top->topreq->vdc->retval = -1; |
| 861 |
120 |
req->top->topreq->doclose = req->doclose; |
| 862 |
14639 |
} |
| 863 |
|
|
| 864 |
|
/*--------------------------------------------------------------------*/ |
| 865 |
|
|
| 866 |
|
static enum vtr_deliver_e v_matchproto_(vtr_deliver_f) |
| 867 |
14640 |
ved_deliver(struct req *req, int wantbody) |
| 868 |
|
{ |
| 869 |
14640 |
int i = 0; |
| 870 |
|
const char *p; |
| 871 |
|
uint16_t status; |
| 872 |
|
struct ecx *ecx; |
| 873 |
|
struct ved_foo foo[1]; |
| 874 |
|
struct vrt_ctx ctx[1]; |
| 875 |
|
|
| 876 |
14640 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
| 877 |
14640 |
CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC); |
| 878 |
14640 |
CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); |
| 879 |
|
|
| 880 |
14640 |
CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); |
| 881 |
|
|
| 882 |
14640 |
status = req->resp->status % 1000; |
| 883 |
|
|
| 884 |
14800 |
if (FEATURE(FEATURE_ESI_INCLUDE_ONERROR) && |
| 885 |
2760 |
status != 200 && status != 204) { |
| 886 |
160 |
ved_close(req, ecx->abrt); |
| 887 |
160 |
return (VTR_D_DONE); |
| 888 |
|
} |
| 889 |
|
|
| 890 |
14480 |
if (wantbody == 0) { |
| 891 |
520 |
ved_close(req, 0); |
| 892 |
520 |
return (VTR_D_DONE); |
| 893 |
|
} |
| 894 |
|
|
| 895 |
13960 |
if (req->boc == NULL && ObjGetLen(req->wrk, req->objcore) == 0) { |
| 896 |
0 |
ved_close(req, 0); |
| 897 |
0 |
return (VTR_D_DONE); |
| 898 |
|
} |
| 899 |
|
|
| 900 |
13960 |
if (http_GetHdr(req->resp, H_Content_Encoding, &p)) |
| 901 |
4320 |
i = http_coding_eq(p, gzip); |
| 902 |
13960 |
if (i) |
| 903 |
4320 |
i = ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED); |
| 904 |
|
|
| 905 |
13960 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
| 906 |
13960 |
VCL_Req2Ctx(ctx, req); |
| 907 |
|
|
| 908 |
13960 |
if (ecx->isgzip && i && !req->res_esi) { |
| 909 |
|
/* A gzipped include which is not ESI processed */ |
| 910 |
|
|
| 911 |
|
/* OA_GZIPBITS are not valid until BOS_FINISHED */ |
| 912 |
1680 |
if (req->boc != NULL) |
| 913 |
560 |
(void)ObjWaitState(req->objcore, BOS_FINISHED); |
| 914 |
|
|
| 915 |
1680 |
if (req->objcore->flags & OC_F_FAILED) { |
| 916 |
|
/* No way of signalling errors in the middle of |
| 917 |
|
* the ESI body. Omit this ESI fragment. |
| 918 |
|
* XXX change error argument to 1 |
| 919 |
|
*/ |
| 920 |
40 |
ved_close(req, 0); |
| 921 |
40 |
return (VTR_D_DONE); |
| 922 |
|
} |
| 923 |
|
|
| 924 |
1640 |
INIT_OBJ(foo, VED_FOO_MAGIC); |
| 925 |
1640 |
foo->ecx = ecx; |
| 926 |
1640 |
foo->objcore = req->objcore; |
| 927 |
1640 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_gzgz, foo); |
| 928 |
13920 |
} else if (ecx->isgzip && !i) { |
| 929 |
|
/* Non-Gzip'ed include in gzipped parent */ |
| 930 |
720 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_pretend_gz, ecx); |
| 931 |
720 |
} else { |
| 932 |
|
/* Anything else goes straight through */ |
| 933 |
11560 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_ved, ecx); |
| 934 |
|
} |
| 935 |
|
|
| 936 |
13920 |
if (i == 0) { |
| 937 |
13880 |
i = VDP_DeliverObj(req->vdc, req->objcore); |
| 938 |
13880 |
} else { |
| 939 |
40 |
VSLb(req->vsl, SLT_Error, "Failure to push ESI processors"); |
| 940 |
40 |
req->doclose = SC_OVERLOAD; |
| 941 |
|
} |
| 942 |
|
|
| 943 |
13920 |
if (i && req->doclose == SC_NULL) |
| 944 |
120 |
req->doclose = SC_REM_CLOSE; |
| 945 |
|
|
| 946 |
13920 |
ved_close(req, i && ecx->abrt ? 1 : 0); |
| 947 |
13920 |
return (VTR_D_DONE); |
| 948 |
14640 |
} |