[master] 571a1e3 Wrap VDP delivery context in a struct
Poul-Henning Kamp
phk at FreeBSD.org
Mon Oct 30 11:08:08 UTC 2017
commit 571a1e3375ced741162772813444f2081cbb1325
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date: Fri Oct 27 07:32:10 2017 +0000
Wrap VDP delivery context in a struct
diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index a1d6155..b64e320 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -532,9 +532,11 @@ struct req {
struct objcore *stale_oc;
/* Deliver pipeline */
- struct vdp_entry_s vdpe;
- struct vdp_entry *vdpe_nxt;
- unsigned vdpe_retval;
+ struct vdp_ctx {
+ struct vdp_entry_s vdp;
+ struct vdp_entry *nxt;
+ unsigned retval;
+ } vdp[1];
/* Delivery mode */
unsigned res_mode;
diff --git a/bin/varnishd/cache/cache_deliver_proc.c b/bin/varnishd/cache/cache_deliver_proc.c
index 342696f..2def938 100644
--- a/bin/varnishd/cache/cache_deliver_proc.c
+++ b/bin/varnishd/cache/cache_deliver_proc.c
@@ -39,7 +39,7 @@
* This function picks and calls the next delivery processor from the
* list. The return value is the return value of the delivery
* processor. Upon seeing a non-zero return value, that lowest value
- * observed is latched in req->vdpe_retval and all subsequent calls to
+ * observed is latched in ->retval and all subsequent calls to
* VDP_bytes will return that value directly without calling the next
* processor.
*
@@ -53,30 +53,34 @@ VDP_bytes(struct req *req, enum vdp_action act, const void *ptr, ssize_t len)
{
int retval;
struct vdp_entry *vdpe;
+ struct vdp_ctx *vdc;
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
+ vdc = req->vdp;
assert(act == VDP_NULL || act == VDP_FLUSH);
- if (req->vdpe_retval)
- return (req->vdpe_retval);
- vdpe = req->vdpe_nxt;
+ if (vdc->retval)
+ return (vdc->retval);
+ vdpe = vdc->nxt;
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
- req->vdpe_nxt = VTAILQ_NEXT(vdpe, list);
+ vdc->nxt = VTAILQ_NEXT(vdpe, list);
assert(act > VDP_NULL || len > 0);
/* Call the present layer, while pointing to the next layer down */
retval = vdpe->vdp->func(req, act, &vdpe->priv, ptr, len);
- if (retval && (req->vdpe_retval == 0 || retval < req->vdpe_retval))
- req->vdpe_retval = retval; /* Latch error value */
- req->vdpe_nxt = vdpe;
- return (req->vdpe_retval);
+ if (retval && (vdc->retval == 0 || retval < vdc->retval))
+ vdc->retval = retval; /* Latch error value */
+ vdc->nxt = vdpe;
+ return (vdc->retval);
}
void
VDP_push(struct req *req, const struct vdp *vdp, void *priv, int bottom)
{
struct vdp_entry *vdpe;
+ struct vdp_ctx *vdc;
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
+ vdc = req->vdp;
AN(vdp);
AN(vdp->name);
AN(vdp->func);
@@ -88,10 +92,10 @@ VDP_push(struct req *req, const struct vdp *vdp, void *priv, int bottom)
vdpe->vdp = vdp;
vdpe->priv = priv;
if (bottom)
- VTAILQ_INSERT_TAIL(&req->vdpe, vdpe, list);
+ VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list);
else
- VTAILQ_INSERT_HEAD(&req->vdpe, vdpe, list);
- req->vdpe_nxt = VTAILQ_FIRST(&req->vdpe);
+ VTAILQ_INSERT_HEAD(&vdc->vdp, vdpe, list);
+ vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
AZ(vdpe->vdp->func(req, VDP_INIT, &vdpe->priv, NULL, 0));
}
@@ -100,15 +104,17 @@ void
VDP_close(struct req *req)
{
struct vdp_entry *vdpe;
+ struct vdp_ctx *vdc;
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
- while (!VTAILQ_EMPTY(&req->vdpe)) {
- vdpe = VTAILQ_FIRST(&req->vdpe);
+ vdc = req->vdp;
+ while (!VTAILQ_EMPTY(&vdc->vdp)) {
+ vdpe = VTAILQ_FIRST(&vdc->vdp);
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
- VTAILQ_REMOVE(&req->vdpe, vdpe, list);
+ VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
AZ(vdpe->vdp->func(req, VDP_FINI, &vdpe->priv, NULL, 0));
AZ(vdpe->priv);
- req->vdpe_nxt = VTAILQ_FIRST(&req->vdpe);
+ vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
}
}
diff --git a/bin/varnishd/cache/cache_gzip.c b/bin/varnishd/cache/cache_gzip.c
index 28e99e3..2781a87 100644
--- a/bin/varnishd/cache/cache_gzip.c
+++ b/bin/varnishd/cache/cache_gzip.c
@@ -327,7 +327,7 @@ vdp_gunzip(struct req *req, enum vdp_action act, void **priv,
* If the size is non-zero AND we are the top
* VDP (ie: no ESI), we know what size the output will be.
*/
- if (u != 0 && VTAILQ_FIRST(&req->vdpe)->vdp == &VDP_gunzip)
+ if (u != 0 && VTAILQ_FIRST(&req->vdp->vdp)->vdp == &VDP_gunzip)
req->resp_len = u;
return (0);
@@ -355,7 +355,7 @@ vdp_gunzip(struct req *req, enum vdp_action act, void **priv,
return (-1);
if (vg->m_len == vg->m_sz || vr != VGZ_OK) {
if (VDP_bytes(req, VDP_FLUSH, vg->m_buf, vg->m_len))
- return (req->vdpe_retval);
+ return (req->vdp->retval);
vg->m_len = 0;
VGZ_Obuf(vg, vg->m_buf, vg->m_sz);
}
diff --git a/bin/varnishd/cache/cache_req.c b/bin/varnishd/cache/cache_req.c
index 43581e7..484d6c5 100644
--- a/bin/varnishd/cache/cache_req.c
+++ b/bin/varnishd/cache/cache_req.c
@@ -137,8 +137,8 @@ Req_New(const struct worker *wrk, struct sess *sp)
req->t_prev = NAN;
req->t_req = NAN;
- req->vdpe_nxt = 0;
- VTAILQ_INIT(&req->vdpe);
+ req->vdp->nxt = 0;
+ VTAILQ_INIT(&req->vdp->vdp);
VRTPRIV_init(req->privs);
diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c
index 649b74a..c3b691d 100644
--- a/bin/varnishd/cache/cache_req_fsm.c
+++ b/bin/varnishd/cache/cache_req_fsm.c
@@ -790,7 +790,7 @@ cnt_recv(struct worker *wrk, struct req *req)
req->director_hint = VCL_DefaultDirector(req->vcl);
AN(req->director_hint);
- req->vdpe_retval = 0;
+ req->vdp->retval = 0;
req->d_ttl = -1;
req->disable_esi = 0;
req->hash_always_miss = 0;
diff --git a/bin/varnishd/http1/cache_http1_deliver.c b/bin/varnishd/http1/cache_http1_deliver.c
index 7ed54a6..80e398c 100644
--- a/bin/varnishd/http1/cache_http1_deliver.c
+++ b/bin/varnishd/http1/cache_http1_deliver.c
@@ -46,7 +46,7 @@ v1d_bytes(struct req *req, enum vdp_action act, void **priv,
if (act == VDP_INIT || act == VDP_FINI)
return (0);
- AZ(req->vdpe_nxt); /* always at the bottom of the pile */
+ AZ(req->vdp->nxt); /* always at the bottom of the pile */
if (len > 0)
wl = V1L_Write(req->wrk, ptr, len);
More information about the varnish-commit
mailing list