[master] c92ac8bb8 Use VDP for bereq.body

Nils Goroll nils.goroll at uplex.de
Mon Sep 30 14:19:07 UTC 2024


commit c92ac8bb818691fae0ca9d96fd36b8711dc3b743
Author: Nils Goroll <nils.goroll at uplex.de>
Date:   Wed Dec 27 10:36:33 2023 +0100

    Use VDP for bereq.body
    
    This change introduces additional failure points to V1F_SendReq() when
    there is insufficient workspace to push the V1L VDP.
    
    We adjust r01834.vtc to cover the respective 503 errors and simplify it
    by example of r02645.vtc. The two test cases now resemble each other a
    lot, but I would think, at least for now, their purpose is so important
    that the overhead does not matter.

diff --git a/bin/varnishd/http1/cache_http1_fetch.c b/bin/varnishd/http1/cache_http1_fetch.c
index 99075c9fe..d2859e930 100644
--- a/bin/varnishd/http1/cache_http1_fetch.c
+++ b/bin/varnishd/http1/cache_http1_fetch.c
@@ -42,25 +42,14 @@
 
 #include "cache_http1.h"
 
-/*--------------------------------------------------------------------
- * Pass the request body to the backend
- */
-
-static int v_matchproto_(objiterate_f)
-vbf_iter_req_body(void *priv, unsigned flush, const void *ptr, ssize_t l)
+static int
+v1f_stackv1l(struct vdp_ctx *vdc, struct busyobj *bo)
 {
-	struct busyobj *bo;
-
-	CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC);
+	struct vrt_ctx ctx[1];
 
-	if (l > 0) {
-		if (DO_DEBUG(DBG_SLOW_BEREQ))
-			VTIM_sleep(1.0);
-		(void)V1L_Write(bo->wrk, ptr, l);
-		if (flush && V1L_Flush(bo->wrk) != SC_NULL)
-			return (-1);
-	}
-	return (0);
+	INIT_OBJ(ctx, VRT_CTX_MAGIC);
+	VCL_Bo2Ctx(ctx, bo);
+	return (VDP_Push(ctx, vdc, ctx->ws, VDP_v1l, NULL));
 }
 
 /*--------------------------------------------------------------------
@@ -80,7 +69,8 @@ V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes,
 	ssize_t i;
 	uint64_t bytes, hdrbytes;
 	struct http_conn *htc;
-	int do_chunked = 0;
+	struct vdp_ctx vdc[1];
+	intmax_t cl;
 
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
@@ -93,13 +83,35 @@ V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes,
 	assert(*htc->rfd > 0);
 	hp = bo->bereq;
 
-	if (bo->req != NULL && !bo->req->req_body_status->length_known) {
-		http_PrintfHeader(hp, "Transfer-Encoding: chunked");
-		do_chunked = 1;
+	if (bo->bereq_body != NULL)
+		cl = ObjGetLen(wrk, bo->bereq_body);
+	else if (bo->req != NULL && !bo->req->req_body_status->length_known)
+		cl = -1;
+	else if (bo->req != NULL) {
+		cl = http_GetContentLength(bo->req->http);
+		if (cl < 0)
+			cl = 0;
 	}
+	else
+		cl = 0;
+
+	VDP_Init(vdc, wrk, bo->vsl, NULL, bo, &cl);
+
+	if (v1f_stackv1l(vdc, bo)) {
+		VSLb(bo->vsl, SLT_FetchError, "Failure to push V1L");
+		VSLb_ts_busyobj(bo, "Bereq", W_TIM_real(wrk));
+		(void) VDP_Close(vdc, NULL, NULL);
+		htc->doclose = SC_OVERLOAD;
+		return (-1);
+	}
+
+	assert(cl >= -1);
+	if (cl < 0)
+		http_PrintfHeader(hp, "Transfer-Encoding: chunked");
 
 	VTCP_blocking(*htc->rfd);	/* XXX: we should timeout instead */
 	/* XXX: need a send_timeout for the backend side */
+	// XXX cache_param->http1_iovs ?
 	V1L_Open(wrk, wrk->aws, htc->rfd, bo->vsl, nan(""), 0);
 	hdrbytes = HTTP1_Write(wrk, hp, HTTP1_Req);
 
@@ -108,16 +120,14 @@ V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes,
 
 	if (bo->bereq_body != NULL) {
 		AZ(bo->req);
-		AZ(do_chunked);
+		assert(cl >= 0);
 		(void)ObjIterate(bo->wrk, bo->bereq_body,
-		    bo, vbf_iter_req_body, 0);
+		    vdc, VDP_ObjIterate, 0);
 	} else if (bo->req != NULL &&
 	    bo->req->req_body_status != BS_NONE) {
-		if (DO_DEBUG(DBG_FLUSH_HEAD))
-			(void)V1L_Flush(wrk);
-		if (do_chunked)
+		if (cl < 0)
 			V1L_Chunked(wrk);
-		i = VRB_Iterate(wrk, bo->vsl, bo->req, vbf_iter_req_body, bo);
+		i = VRB_Iterate(wrk, bo->vsl, bo->req, VDP_ObjIterate, vdc);
 
 		if (bo->req->req_body_status != BS_CACHED)
 			bo->no_retry = "req.body not cached";
@@ -138,7 +148,7 @@ V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes,
 			    errno, VAS_errtxt(errno));
 			bo->req->doclose = SC_RX_BODY;
 		}
-		if (do_chunked)
+		if (cl < 0)
 			V1L_EndChunk(wrk);
 	}
 
@@ -146,12 +156,8 @@ V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes,
 	CHECK_OBJ_NOTNULL(sc, STREAM_CLOSE_MAGIC);
 
 	/* Bytes accounting */
-	if (bytes < hdrbytes)
-		*ctr_hdrbytes += bytes;
-	else {
-		*ctr_hdrbytes += hdrbytes;
-		*ctr_bodybytes += bytes - hdrbytes;
-	}
+	*ctr_hdrbytes += hdrbytes;
+	*ctr_bodybytes += VDP_Close(vdc, NULL, NULL);
 
 	if (sc == SC_NULL && i < 0)
 		sc = SC_TX_ERROR;
diff --git a/bin/varnishtest/tests/r01834.vtc b/bin/varnishtest/tests/r01834.vtc
index 33437a9b2..b1f689e50 100644
--- a/bin/varnishtest/tests/r01834.vtc
+++ b/bin/varnishtest/tests/r01834.vtc
@@ -3,9 +3,13 @@ varnishtest "#1834 - Buffer overflow in backend workspace"
 # This test case checks fetch side for buffer overflow when there is little
 # workspace left. If failing it would be because we tripped the canary
 # at the end of the workspace.
+#
+# status codes are not so important here, we just should not panic
+#
+# XXX almost the same as r02645.vtc
 
-
-server s1 -repeat 64 {
+server s1 -repeat 40 {
+	non_fatal
 	rxreq
 	txresp
 } -start
@@ -19,95 +23,30 @@ varnish v1 -vcl+backend {
 	}
 
 	sub vcl_backend_fetch {
-		vtc.workspace_alloc(backend, -1 * std.integer(bereq.http.WS, 256));
+		# this relies on deterministic xids.
+		# first bereq is xid == 1002
+		vtc.workspace_alloc(backend, -320 +
+		    8 * ((bereq.xid - 1002) / 3));
 	}
 } -start
 
-client c1 {
-	# Start with enough workspace to receive a good result
-	txreq -hdr "WS: 320"
+# Start with enough workspace to receive some good results
+client c1 -repeat 5 {
+	txreq -hdr "Connection: close"
 	rxresp
 	expect resp.status == 200
+} -run
 
-	# Continue with decreasing workspaces by decrements of 8 (sizeof void*)
-	txreq -hdr "WS: 312"
-	rxresp
-	txreq -hdr "WS: 304"
-	rxresp
-	txreq -hdr "WS: 296"
-	rxresp
-	txreq -hdr "WS: 288"
-	rxresp
-	txreq -hdr "WS: 280"
-	rxresp
-	txreq -hdr "WS: 272"
-	rxresp
-	txreq -hdr "WS: 264"
-	rxresp
-	txreq -hdr "WS: 256"
-	rxresp
-	txreq -hdr "WS: 248"
-	rxresp
-	txreq -hdr "WS: 240"
-	rxresp
-	txreq -hdr "WS: 232"
-	rxresp
-	txreq -hdr "WS: 224"
-	rxresp
-	txreq -hdr "WS: 216"
-	rxresp
-	txreq -hdr "WS: 208"
-	rxresp
-	txreq -hdr "WS: 200"
-	rxresp
-	txreq -hdr "WS: 192"
-	rxresp
-	txreq -hdr "WS: 184"
-	rxresp
-	txreq -hdr "WS: 176"
-	rxresp
-	txreq -hdr "WS: 168"
-	rxresp
-	txreq -hdr "WS: 160"
+# tolerance region where we do not care about the return code
+# failures should start at around xid 1042 (repeat 10)
+client c1 -repeat 15 {
+	txreq -hdr "Connection: close"
 	rxresp
-	txreq -hdr "WS: 152"
-	rxresp
-	txreq -hdr "WS: 144"
-	rxresp
-	txreq -hdr "WS: 136"
-	rxresp
-	txreq -hdr "WS: 128"
-	rxresp
-	txreq -hdr "WS: 120"
-	rxresp
-	txreq -hdr "WS: 112"
-	rxresp
-	txreq -hdr "WS: 104"
-	rxresp
-	txreq -hdr "WS: 096"
-	rxresp
-	txreq -hdr "WS: 088"
-	rxresp
-	txreq -hdr "WS: 080"
-	rxresp
-	txreq -hdr "WS: 072"
-	rxresp
-	txreq -hdr "WS: 064"
-	rxresp
-	txreq -hdr "WS: 056"
-	rxresp
-	txreq -hdr "WS: 048"
-	rxresp
-	txreq -hdr "WS: 040"
-	rxresp
-	txreq -hdr "WS: 032"
-	rxresp
-	txreq -hdr "WS: 024"
-	rxresp
-	txreq -hdr "WS: 016"
-	rxresp
-	txreq -hdr "WS: 008"
-	rxresp
-	txreq -hdr "WS: 000"
+} -run
+
+# final must all fail.
+client c1 -repeat 20 {
+	txreq -hdr "Connection: close"
 	rxresp
+	expect resp.status == 503
 } -run
diff --git a/bin/varnishtest/tests/r02645.vtc b/bin/varnishtest/tests/r02645.vtc
index 982c50a96..2f6a86d7d 100644
--- a/bin/varnishtest/tests/r02645.vtc
+++ b/bin/varnishtest/tests/r02645.vtc
@@ -1,6 +1,9 @@
 varnishtest "sweep through tight backend workspace conditions"
 
+# XXX almost the same as r01834.vtc - retire?
+
 server s1 -repeat 100 {
+	non_fatal
 	rxreq
 	send "HTTP/1.1 200 OK\r\nTransfer-encoding: chunked\r\n\r\n00000004\r\n1234\r\n00000000\r\n\r\n"
 } -start


More information about the varnish-commit mailing list