[master] 17ca56af5 ws: New WS_ReqPipeline()

Dridi Boukelmoune dridi.boukelmoune at gmail.com
Fri Aug 27 15:30:09 UTC 2021


commit 17ca56af55e11858baa154772668b387391342e4
Author: Dridi Boukelmoune <dridi.boukelmoune at gmail.com>
Date:   Mon Jul 12 14:15:47 2021 +0200

    ws: New WS_ReqPipeline()
    
    When a session has data pipelined we perform a dirty dance to move it at
    the beginning of the workspace. The rollbacks used to occur between
    HTC_RxPipeline() and HTC_RxInit() calls until it was centralized in the
    latter.
    
    With a dedicated WS_ReqPipeline() operation we can capture the semantics
    of initializing an existing connection for its next task with or without
    data fetched from the previous task.
    
    While conceptually there is still a use-after-free since the pipelined
    data may belong to the same workspace, it is fine if that happens within
    the bounds of an atomic workspace operation.

diff --git a/bin/varnishd/cache/cache_session.c b/bin/varnishd/cache/cache_session.c
index d6a711937..f3bb4da15 100644
--- a/bin/varnishd/cache/cache_session.c
+++ b/bin/varnishd/cache/cache_session.c
@@ -223,30 +223,16 @@ HTC_Status(enum htc_status_e e)
 void
 HTC_RxInit(struct http_conn *htc, struct ws *ws)
 {
-	unsigned r;
-	ssize_t l;
+	unsigned l;
 
 	CHECK_OBJ_NOTNULL(htc, HTTP_CONN_MAGIC);
 	htc->ws = ws;
 
-	if (!strcasecmp(ws->id, "req"))
-		WS_Rollback(ws, 0);
-	else
-		AZ(htc->pipeline_b);
-
-	r = WS_ReserveAll(htc->ws);
-	htc->rxbuf_b = htc->rxbuf_e = WS_Reservation(ws);
-	if (htc->pipeline_b != NULL) {
-		AN(htc->pipeline_e);
-		// assert(WS_Inside(ws, htc->pipeline_b, htc->pipeline_e));
-		l = htc->pipeline_e - htc->pipeline_b;
-		assert(l > 0);
-		assert(l <= r);
-		memmove(htc->rxbuf_b, htc->pipeline_b, l);
-		htc->rxbuf_e += l;
-		htc->pipeline_b = NULL;
-		htc->pipeline_e = NULL;
-	}
+	l = WS_ReqPipeline(htc->ws, htc->pipeline_b, htc->pipeline_e);
+	htc->rxbuf_b = WS_Reservation(ws);
+	htc->rxbuf_e = htc->rxbuf_b + l;
+	htc->pipeline_b = NULL;
+	htc->pipeline_e = NULL;
 }
 
 void
diff --git a/bin/varnishd/cache/cache_varnishd.h b/bin/varnishd/cache/cache_varnishd.h
index df9ca6286..24746b0f7 100644
--- a/bin/varnishd/cache/cache_varnishd.h
+++ b/bin/varnishd/cache/cache_varnishd.h
@@ -534,6 +534,7 @@ WS_IsReserved(const struct ws *ws)
 
 void WS_Rollback(struct ws *, uintptr_t);
 void *WS_AtOffset(const struct ws *ws, unsigned off, unsigned len);
+unsigned WS_ReqPipeline(struct ws *, const void *b, const void *e);
 
 static inline unsigned
 WS_ReservationOffset(const struct ws *ws)
diff --git a/bin/varnishd/cache/cache_ws.c b/bin/varnishd/cache/cache_ws.c
index 091179d47..38f439779 100644
--- a/bin/varnishd/cache/cache_ws.c
+++ b/bin/varnishd/cache/cache_ws.c
@@ -174,6 +174,37 @@ WS_Rollback(struct ws *ws, uintptr_t pp)
 	WS_Reset(ws, pp);
 }
 
+/*
+ * Make a reservation and optionally pipeline a memory region that may or
+ * may not originate from the same workspace.
+ */
+
+unsigned
+WS_ReqPipeline(struct ws *ws, const void *b, const void *e)
+{
+	unsigned r, l;
+
+	WS_Assert(ws);
+
+	if (!strcasecmp(ws->id, "req"))
+		WS_Rollback(ws, 0);
+	else
+		AZ(b);
+
+	if (b == NULL) {
+		AZ(e);
+		(void)WS_ReserveAll(ws);
+		return (0);
+	}
+
+	AN(e);
+	r = WS_ReserveAll(ws);
+	l = pdiff(b, e);
+	assert(l <= r);
+	memmove(ws->f, b, l);
+	return (l);
+}
+
 void *
 WS_Alloc(struct ws *ws, unsigned bytes)
 {


More information about the varnish-commit mailing list