[4.1] 11830c5 Add extra locking to protect the pools list and refcounts

Martin Blix Grydeland martin at varnish-software.com
Tue Jun 27 16:07:06 CEST 2017


commit 11830c5d1e5efa3a0d3e2dcb4da56429278e60a3
Author: Martin Blix Grydeland <martin at varnish-software.com>
Date:   Tue Jun 27 15:33:41 2017 +0200

    Add extra locking to protect the pools list and refcounts
    
    Probes currently running on a worker thread at the time they are
    deleted will delay the release of the refcount they hold on the TCP
    pool. Since this call will not be from the CLI thread we need locking
    to protect these datastructures.

diff --git a/bin/varnishd/cache/cache_backend_tcp.c b/bin/varnishd/cache/cache_backend_tcp.c
index 9f28d22..8e9b660 100644
--- a/bin/varnishd/cache/cache_backend_tcp.c
+++ b/bin/varnishd/cache/cache_backend_tcp.c
@@ -71,6 +71,7 @@ struct tcp_pool {
 
 };
 
+static struct lock		pools_mtx;
 static VTAILQ_HEAD(, tcp_pool)	pools = VTAILQ_HEAD_INITIALIZER(pools);
 
 /*--------------------------------------------------------------------
@@ -127,6 +128,7 @@ VBT_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6)
 {
 	struct tcp_pool *tp;
 
+	Lck_Lock(&pools_mtx);
 	VTAILQ_FOREACH(tp, &pools, list) {
 		assert(tp->refcnt > 0);
 		if (ip4 == NULL) {
@@ -148,8 +150,10 @@ VBT_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6)
 				continue;
 		}
 		tp->refcnt++;
+		Lck_Unlock(&pools_mtx);
 		return (tp);
 	}
+	Lck_Unlock(&pools_mtx);
 
 	ALLOC_OBJ(tp, TCP_POOL_MAGIC);
 	AN(tp);
@@ -161,10 +165,14 @@ VBT_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6)
 	Lck_New(&tp->mtx, lck_backend_tcp);
 	VTAILQ_INIT(&tp->connlist);
 	VTAILQ_INIT(&tp->killlist);
-	VTAILQ_INSERT_HEAD(&pools, tp, list);
 	INIT_OBJ(&tp->waitfor, WAITFOR_MAGIC);
 	tp->waitfor.func = tcp_handle;
 	tp->waitfor.tmo = &cache_param->backend_idle_timeout;
+
+	Lck_Lock(&pools_mtx);
+	VTAILQ_INSERT_HEAD(&pools, tp, list);
+	Lck_Unlock(&pools_mtx);
+
 	return (tp);
 }
 
@@ -182,11 +190,16 @@ VBT_Rel(struct tcp_pool **tpp)
 	tp = *tpp;
 	*tpp = NULL;
 	CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
+	Lck_Lock(&pools_mtx);
 	assert(tp->refcnt > 0);
-	if (--tp->refcnt > 0)
+	if (--tp->refcnt > 0) {
+		Lck_Unlock(&pools_mtx);
 		return;
+	}
 	AZ(tp->n_used);
 	VTAILQ_REMOVE(&pools, tp, list);
+	Lck_Unlock(&pools_mtx);
+
 	free(tp->name);
 	free(tp->ip4);
 	free(tp->ip6);
@@ -408,3 +421,11 @@ VBT_Wait(struct worker *wrk, struct vbc *vbc)
 	vbc->cond = NULL;
 	Lck_Unlock(&tp->mtx);
 }
+
+/*--------------------------------------------------------------------*/
+
+void
+VBT_Init(void)
+{
+	Lck_New(&pools_mtx, lck_backend);
+}
diff --git a/bin/varnishd/cache/cache_main.c b/bin/varnishd/cache/cache_main.c
index 2609d7b..f0ebf83 100644
--- a/bin/varnishd/cache/cache_main.c
+++ b/bin/varnishd/cache/cache_main.c
@@ -233,6 +233,7 @@ child_main(void)
 	HTTP_Init();
 
 	VBO_Init();
+	VBT_Init();
 	VBP_Init();
 	VBE_InitCfg();
 	Pool_Init();
diff --git a/bin/varnishd/cache/cache_priv.h b/bin/varnishd/cache/cache_priv.h
index 9a7ba9f..521f569 100644
--- a/bin/varnishd/cache/cache_priv.h
+++ b/bin/varnishd/cache/cache_priv.h
@@ -54,6 +54,9 @@ void VCA_Shutdown(void);
 void VBE_InitCfg(void);
 void VBE_Poll(void);
 
+/* cache_backend_tcp.c */
+void VBT_Init(void);
+
 /* cache_backend_poll.c */
 void VBP_Init(void);
 



More information about the varnish-commit mailing list