[master] 37306d264 cache_deliver_proc: Add VDPIO_Close1()
Nils Goroll
nils.goroll at uplex.de
Fri Jul 4 17:04:05 UTC 2025
commit 37306d2641230a6e49dd044565179469b7d2e2bb
Author: Nils Goroll <nils.goroll at uplex.de>
Date: Mon Mar 3 10:01:14 2025 +0100
cache_deliver_proc: Add VDPIO_Close1()
Some VDPs might reach a point where there are done with their work, such that
they would only pass data on. Avoid unnecessary overhead by allowing them to
remove themselves from the filter chain.
The added test brings with it some changes to vmod_debug_transport_vai.c, which
have the nice side effect to also test VDP_Push() after a failed VDPIO_Upgrade
now.
diff --git a/bin/varnishd/cache/cache_deliver_proc.c b/bin/varnishd/cache/cache_deliver_proc.c
index 7b1fd0ab9..037aeda49 100644
--- a/bin/varnishd/cache/cache_deliver_proc.c
+++ b/bin/varnishd/cache/cache_deliver_proc.c
@@ -348,6 +348,23 @@ VDPIO_Upgrade(VRT_CTX, struct vdp_ctx *vdc)
return ((vdc->retval = r));
}
+uint64_t
+VDPIO_Close1(struct vdp_ctx *vdc, struct vdp_entry *vdpe)
+{
+ uint64_t rv;
+
+ CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
+ rv = vdpe->bytes_in;
+ VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name,
+ (uintmax_t)vdpe->calls, (uintmax_t)rv);
+ if (vdpe->vdp->io_fini != NULL)
+ vdpe->vdp->io_fini(vdc, &vdpe->priv);
+ AZ(vdpe->priv);
+ VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
+ vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
+ return (rv);
+}
+
uint64_t
VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc)
{
@@ -359,17 +376,8 @@ VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc)
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC);
CHECK_OBJ_ORNULL(boc, BOC_MAGIC);
- while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) {
- CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC);
- rv = vdpe->bytes_in;
- VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name,
- (uintmax_t)vdpe->calls, (uintmax_t)rv);
- if (vdpe->vdp->io_fini != NULL)
- vdpe->vdp->io_fini(vdc, &vdpe->priv);
- AZ(vdpe->priv);
- VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
- vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
- }
+ while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL)
+ rv = VDPIO_Close1(vdc, vdpe);
if (oc != NULL)
HSH_Cancel(vdc->wrk, oc, boc);
diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h
index ce3cdabc2..4e1abe93e 100644
--- a/bin/varnishd/cache/cache_filter.h
+++ b/bin/varnishd/cache/cache_filter.h
@@ -238,6 +238,8 @@ vdpio_pull(struct vdp_ctx *vdc, struct vdp_entry *vdpe, struct vscarab *scarab)
return (ObjVAIlease(vdc->wrk, vdc->vai_hdl, scarab));
}
+uint64_t VDPIO_Close1(struct vdp_ctx *, struct vdp_entry *vdpe);
+
/*
* ============================================================
* VDPIO helpers
diff --git a/bin/varnishd/http1/cache_http1_line.c b/bin/varnishd/http1/cache_http1_line.c
index fca1f6e21..09c9d2eaa 100644
--- a/bin/varnishd/http1/cache_http1_line.c
+++ b/bin/varnishd/http1/cache_http1_line.c
@@ -462,9 +462,7 @@ const struct vdp * const VDP_v1l = &(struct vdp){
.init = v1l_init,
.bytes = v1l_bytes,
-#ifdef LATER
.io_init = v1l_io_init,
-#endif
.io_upgrade = v1l_io_upgrade,
.io_lease = v1l_io_lease,
};
diff --git a/bin/varnishtest/tests/m00061.vtc b/bin/varnishtest/tests/m00061.vtc
index 1031c36e6..04f3489b4 100644
--- a/bin/varnishtest/tests/m00061.vtc
+++ b/bin/varnishtest/tests/m00061.vtc
@@ -43,7 +43,8 @@ client c0 -repeat 16 -keepalive {
client c1 -repeat 16 -keepalive {
txreq
rxresp
- expect resp.bodylen == 13107
+ expect resp.bodylen == 13113
+ expect req.body ~ "^hello "
} -start
#client c2 -repeat 16 -keepalive {
diff --git a/vmod/vmod_debug_transport_vai.c b/vmod/vmod_debug_transport_vai.c
index da5f8a60b..3ca5496b3 100644
--- a/vmod/vmod_debug_transport_vai.c
+++ b/vmod/vmod_debug_transport_vai.c
@@ -41,6 +41,58 @@
#include "vmod_debug.h"
+#define HELLO "hello "
+
+static int v_matchproto_(vdpio_init_f)
+vdpio_hello_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity)
+{
+
+ (void)ctx;
+ (void)priv;
+
+ CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
+ AN(vdc->clen);
+
+ if (*vdc->clen < 0)
+ return (capacity);
+
+ *vdc->clen += strlen(HELLO);
+ http_Unset(vdc->hp, H_Content_Length);
+ http_PrintfHeader(vdc->hp, "Content-Length: %zd", *vdc->clen);
+ return (capacity);
+}
+
+static int v_matchproto_(vdpio_lease_f)
+vdpio_hello_lease(struct vdp_ctx *vdc, struct vdp_entry *this,
+ struct vscarab *scarab)
+{
+ int r;
+
+ VSCARAB_CHECK_NOTNULL(scarab);
+ if (scarab->used == scarab->capacity)
+ return (0);
+ //lint -e{446} side effects in initializer - uh?
+ VSCARAB_ADD_IOV_NORET(scarab, ((struct iovec)
+ {.iov_base = TRUST_ME(HELLO), .iov_len = strlen(HELLO)}));
+ r = vdpio_pull(vdc, this, scarab);
+
+ (void) VDPIO_Close1(vdc, this);
+
+ // return error from pull
+ if (r < 0)
+ r = 1;
+ else
+ r += 1;
+
+ return (r);
+}
+
+static const struct vdp vdp_hello = {
+ .name = "hello",
+ .io_init = vdpio_hello_init,
+ .io_lease = vdpio_hello_lease
+};
+
static void
dbg_vai_error(struct req *req, struct v1l **v1lp, const char *msg)
{
@@ -87,14 +139,15 @@ dbg_vai_deliver(struct req *req, int sendbody)
cache_param->http1_iovs);
if (v1l == NULL) {
- dbg_vai_error(req, &v1l, "Failure to init v1d (workspace_thread overflow)");
+ dbg_vai_error(req, &v1l, "Failure to init v1d "
+ "(workspace_thread overflow)");
return (VTR_D_DONE);
}
// Do not roll back req->ws upon V1L_Close()
V1L_NoRollback(v1l);
- if (sendbody) {
+ while (sendbody) {
if (!http_GetHdr(req->resp, H_Content_Length, NULL)) {
if (req->http->protover == 11) {
http_SetHeader(req->resp,
@@ -105,11 +158,25 @@ dbg_vai_deliver(struct req *req, int sendbody)
}
INIT_OBJ(ctx, VRT_CTX_MAGIC);
VCL_Req2Ctx(ctx, req);
- if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) {
- dbg_vai_error(req, &v1l, "Failure to push v1d processor");
+ cap = VDPIO_Upgrade(ctx, req->vdc);
+ if (cap <= 0) {
+ if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) {
+ dbg_vai_error(req, &v1l, "Failure to push v1d");
+ return (VTR_D_DONE);
+ }
+ break;
+ }
+ cap = VDPIO_Push(ctx, req->vdc, req->ws, &vdp_hello, NULL);
+ if (cap < 1) {
+ dbg_vai_error(req, &v1l, "Failure to push hello");
return (VTR_D_DONE);
}
- cap = VDPIO_Upgrade(ctx, req->vdc);
+ cap = VDPIO_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l);
+ if (cap < 1) {
+ dbg_vai_error(req, &v1l, "Failure to push v1d (vdpio)");
+ return (VTR_D_DONE);
+ }
+ break;
}
if (WS_Overflowed(req->ws)) {
More information about the varnish-commit
mailing list