[master] 1e82f50 Complement VDP error handling
Nils Goroll
nils.goroll at uplex.de
Tue Apr 24 06:22:21 UTC 2018
commit 1e82f502a027b89caa94be88496b20aaf3970c8d
Author: Nils Goroll <nils.goroll at uplex.de>
Date: Mon Apr 23 11:25:57 2018 +0200
Complement VDP error handling
* remember push errors in the context
* also fail additional pushes once we saw an error
Try to fail requests as early as possible. Otherwise just calling VDP
functions after an error is a noop.
Fixes #2618
diff --git a/bin/varnishd/cache/cache_deliver_proc.c b/bin/varnishd/cache/cache_deliver_proc.c
index 20439b1..e3cdcee 100644
--- a/bin/varnishd/cache/cache_deliver_proc.c
+++ b/bin/varnishd/cache/cache_deliver_proc.c
@@ -73,7 +73,7 @@ VDP_bytes(struct req *req, enum vdp_action act, const void *ptr, ssize_t len)
return (vdc->retval);
}
-void
+int
VDP_push(struct req *req, const struct vdp *vdp, void *priv, int bottom)
{
struct vdp_entry *vdpe;
@@ -85,12 +85,18 @@ VDP_push(struct req *req, const struct vdp *vdp, void *priv, int bottom)
AN(vdp->name);
AN(vdp->func);
+ if (vdc->retval)
+ return (vdc->retval);
+
if (DO_DEBUG(DBG_PROCESSORS))
VSLb(req->vsl, SLT_Debug, "VDP_push(%s)", vdp->name);
vdpe = WS_Alloc(req->ws, sizeof *vdpe);
- if (vdpe == NULL)
- return;
+ if (vdpe == NULL) {
+ AZ(vdc->retval);
+ vdc->retval = -1;
+ return (vdc->retval);
+ }
INIT_OBJ(vdpe, VDP_ENTRY_MAGIC);
vdpe->vdp = vdp;
vdpe->priv = priv;
@@ -100,7 +106,9 @@ VDP_push(struct req *req, const struct vdp *vdp, void *priv, int bottom)
VTAILQ_INSERT_HEAD(&vdc->vdp, vdpe, list);
vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
- AZ(vdpe->vdp->func(req, VDP_INIT, &vdpe->priv, NULL, 0));
+ AZ(vdc->retval);
+ vdc->retval = vdpe->vdp->func(req, VDP_INIT, &vdpe->priv, NULL, 0);
+ return (vdc->retval);
}
void
@@ -113,10 +121,15 @@ VDP_close(struct req *req)
vdc = req->vdc;
while (!VTAILQ_EMPTY(&vdc->vdp)) {
vdpe = VTAILQ_FIRST(&vdc->vdp);
- CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
- VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
- AZ(vdpe->vdp->func(req, VDP_FINI, &vdpe->priv, NULL, 0));
- AZ(vdpe->priv);
+ if (vdc->retval >= 0)
+ AN(vdpe);
+ if (vdpe != NULL) {
+ CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
+ VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
+ AZ(vdpe->vdp->func(req, VDP_FINI, &vdpe->priv,
+ NULL, 0));
+ AZ(vdpe->priv);
+ }
vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
}
}
diff --git a/bin/varnishd/cache/cache_esi_deliver.c b/bin/varnishd/cache/cache_esi_deliver.c
index 3af6985..1829804 100644
--- a/bin/varnishd/cache/cache_esi_deliver.c
+++ b/bin/varnishd/cache/cache_esi_deliver.c
@@ -815,9 +815,9 @@ ved_deliver(struct req *req, struct boc *boc, int wantbody)
ved_stripgzip(req, boc);
} else {
if (ecx->isgzip && !i)
- VDP_push(req, &ved_vdp_pgz, ecx, 1);
+ (void)VDP_push(req, &ved_vdp_pgz, ecx, 1);
else
- VDP_push(req, &ved_ved, ecx->preq, 1);
+ (void)VDP_push(req, &ved_ved, ecx->preq, 1);
(void)VDP_DeliverObj(req);
(void)VDP_bytes(req, VDP_FLUSH, NULL, 0);
}
diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h
index 3192238..b66afb0 100644
--- a/bin/varnishd/cache/cache_filter.h
+++ b/bin/varnishd/cache/cache_filter.h
@@ -123,8 +123,8 @@ struct vdp_ctx {
#define VDP_CTX_MAGIC 0xee501df7
struct vdp_entry_s vdp;
struct vdp_entry *nxt;
- unsigned retval;
+ int retval;
};
int VDP_bytes(struct req *, enum vdp_action act, const void *ptr, ssize_t len);
-void VDP_push(struct req *, const struct vdp *, void *priv, int bottom);
+int VDP_push(struct req *, const struct vdp *, void *priv, int bottom);
diff --git a/bin/varnishd/cache/cache_range.c b/bin/varnishd/cache/cache_range.c
index acd7034..373d1f6 100644
--- a/bin/varnishd/cache/cache_range.c
+++ b/bin/varnishd/cache/cache_range.c
@@ -175,7 +175,8 @@ vrg_dorange(struct req *req, const char *r)
vrg_priv->range_off = 0;
vrg_priv->range_low = low;
vrg_priv->range_high = high + 1;
- VDP_push(req, &vrg_vdp, vrg_priv, 1);
+ if (VDP_push(req, &vrg_vdp, vrg_priv, 1))
+ return ("WS too small");
http_PutResponse(req->resp, "HTTP/1.1", 206, NULL);
return (NULL);
}
diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c
index e327c0d..00a0249 100644
--- a/bin/varnishd/cache/cache_req_fsm.c
+++ b/bin/varnishd/cache/cache_req_fsm.c
@@ -342,7 +342,7 @@ cnt_transmit(struct worker *wrk, struct req *req)
struct boc *boc;
const char *r;
uint16_t status;
- int sendbody;
+ int err, sendbody;
intmax_t clval;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
@@ -375,15 +375,18 @@ cnt_transmit(struct worker *wrk, struct req *req)
} else
sendbody = 1;
+ err = 0;
if (sendbody >= 0) {
if (!req->disable_esi && req->resp_len != 0 &&
- ObjHasAttr(wrk, req->objcore, OA_ESIDATA))
- VDP_push(req, &VDP_esi, NULL, 0);
+ ObjHasAttr(wrk, req->objcore, OA_ESIDATA) &&
+ VDP_push(req, &VDP_esi, NULL, 0) < 0)
+ err++;
if (cache_param->http_gzip_support &&
ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED) &&
- !RFC2616_Req_Gzip(req->http))
- VDP_push(req, &VDP_gunzip, NULL, 1);
+ !RFC2616_Req_Gzip(req->http) &&
+ VDP_push(req, &VDP_gunzip, NULL, 1) < 0)
+ err++;
if (cache_param->http_range_support &&
http_IsStatus(req->resp, 200)) {
@@ -405,7 +408,12 @@ cnt_transmit(struct worker *wrk, struct req *req)
"Content-Length: %jd", req->resp_len);
}
- req->transport->deliver(req, boc, sendbody);
+ if (err == 0)
+ req->transport->deliver(req, boc, sendbody);
+ else {
+ VSLb(req->vsl, SLT_Error, "Failure to push processors");
+ req->doclose = SC_OVERLOAD;
+ }
VSLb_ts_req(req, "Resp", W_TIM_real(wrk));
diff --git a/bin/varnishd/http1/cache_http1_deliver.c b/bin/varnishd/http1/cache_http1_deliver.c
index c7a6226..fda38dd 100644
--- a/bin/varnishd/http1/cache_http1_deliver.c
+++ b/bin/varnishd/http1/cache_http1_deliver.c
@@ -128,8 +128,11 @@ V1D_Deliver(struct req *req, struct boc *boc, int sendbody)
if (req->resp_len == 0)
sendbody = 0;
- if (sendbody)
- VDP_push(req, &v1d_vdp, NULL, 1);
+ if (sendbody && VDP_push(req, &v1d_vdp, NULL, 1)) {
+ v1d_error(req, "workspace_thread overflow");
+ AZ(req->wrk->v1l);
+ return;
+ }
AZ(req->wrk->v1l);
V1L_Open(req->wrk, req->wrk->aws,
diff --git a/bin/varnishd/http2/cache_http2_deliver.c b/bin/varnishd/http2/cache_http2_deliver.c
index a9f43e9..a0ff969 100644
--- a/bin/varnishd/http2/cache_http2_deliver.c
+++ b/bin/varnishd/http2/cache_http2_deliver.c
@@ -265,10 +265,13 @@ h2_deliver(struct req *req, struct boc *boc, int sendbody)
WS_Release(req->ws, 0);
- if (sendbody) {
- VDP_push(req, &h2_vdp, NULL, 1);
+ /* XXX someone into H2 please add appropriate error handling */
+ while (sendbody) {
+ err = VDP_push(req, &h2_vdp, NULL, 1);
+ if (err)
+ break;
err = VDP_DeliverObj(req);
- /*XXX*/(void)err;
+ break;
}
AZ(req->wrk->v1l);
diff --git a/bin/varnishtest/tests/r02618.vtc b/bin/varnishtest/tests/r02618.vtc
new file mode 100644
index 0000000..efe1a23
--- /dev/null
+++ b/bin/varnishtest/tests/r02618.vtc
@@ -0,0 +1,25 @@
+varnishtest "sweep through tight client workspace conditions in deliver"
+
+server s1 {
+ rxreq
+ txresp -hdr "Cache-Control: mag-age=3600" -bodylen 1024
+} -start
+
+varnish v1 -vcl+backend {
+ import vtc;
+ import std;
+ sub vcl_recv {
+ return (hash);
+ }
+ sub vcl_deliver {
+ vtc.workspace_alloc(client, -4 *
+ (std.integer(req.xid, 1001) - 1001) / 2);
+ }
+} -start
+
+client c1 -repeat 100 {
+ txreq -url "/"
+ # some responses will fail (503), some won't. All we care
+ # about here is the fact that we don't panic
+ rxresp
+} -run
More information about the varnish-commit
mailing list