[master] b358d56 Lock the VSMW layer in the worker process to stabilize v00003.

Poul-Henning Kamp phk at FreeBSD.org
Mon Feb 26 22:20:10 UTC 2018


commit b358d56bec6575ba3bd5407fad869883f1d958d8
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Mon Feb 26 22:18:55 2018 +0000

    Lock the VSMW layer in the worker process to stabilize v00003.
    
    I don't think it matters anywhere else, but future-proofing is good.

diff --git a/bin/varnishd/cache/cache_shmlog.c b/bin/varnishd/cache/cache_shmlog.c
index c7b5ed8..754c8f6 100644
--- a/bin/varnishd/cache/cache_shmlog.c
+++ b/bin/varnishd/cache/cache_shmlog.c
@@ -45,6 +45,7 @@
 
 /* These cannot be struct lock, which depends on vsm/vsl working */
 static pthread_mutex_t vsl_mtx;
+static pthread_mutex_t vsc_mtx;
 static pthread_mutex_t vsm_mtx;
 
 static struct VSL_head		*vsl_head;
@@ -478,15 +479,27 @@ VSL_End(struct vsl_log *vsl)
 	vsl->wid = 0;
 }
 
-static void
+static void v_matchproto_(vsm_lock_f)
 vsm_vsc_lock(void)
 {
-	AZ(pthread_mutex_lock(&vsm_mtx));
+	AZ(pthread_mutex_lock(&vsc_mtx));
 }
 
-static void
+static void v_matchproto_(vsm_lock_f)
 vsm_vsc_unlock(void)
 {
+	AZ(pthread_mutex_unlock(&vsc_mtx));
+}
+
+static void v_matchproto_(vsm_lock_f)
+vsm_vsmw_lock(void)
+{
+	AZ(pthread_mutex_lock(&vsm_mtx));
+}
+
+static void v_matchproto_(vsm_lock_f)
+vsm_vsmw_unlock(void)
+{
 	AZ(pthread_mutex_unlock(&vsm_mtx));
 }
 
@@ -500,10 +513,13 @@ VSM_Init(void)
 	assert(UINT_MAX % VSL_SEGMENTS == VSL_SEGMENTS - 1);
 
 	AZ(pthread_mutex_init(&vsl_mtx, NULL));
+	AZ(pthread_mutex_init(&vsc_mtx, NULL));
 	AZ(pthread_mutex_init(&vsm_mtx, NULL));
 
 	vsc_lock = vsm_vsc_lock;
 	vsc_unlock = vsm_vsc_unlock;
+	vsmw_lock = vsm_vsmw_lock;
+	vsmw_unlock = vsm_vsmw_unlock;
 
 	VSC_C_main = VSC_main_New(NULL, NULL, "");
 	AN(VSC_C_main);
diff --git a/bin/varnishd/common/common_vsc.c b/bin/varnishd/common/common_vsc.c
index 23dd5be..7beb903 100644
--- a/bin/varnishd/common/common_vsc.c
+++ b/bin/varnishd/common/common_vsc.c
@@ -71,8 +71,13 @@ struct vsc_seg {
 static VTAILQ_HEAD(,vsc_seg)	vsc_seglist =
     VTAILQ_HEAD_INITIALIZER(vsc_seglist);
 
-vsc_callback_f *vsc_lock;
-vsc_callback_f *vsc_unlock;
+static void v_matchproto_(vsm_lock_f)
+vsc_dummy_lock(void)
+{
+}
+
+vsm_lock_f *vsc_lock = vsc_dummy_lock;
+vsm_lock_f *vsc_unlock = vsc_dummy_lock;
 
 static const size_t vsc_overhead = PRNDUP(sizeof(struct vsc_head));
 
@@ -137,8 +142,7 @@ VRT_VSC_Alloc(struct vsmw_cluster *vc, struct vsc_seg **sg,
 	char buf[1024];
 	uintptr_t jjp;
 
-	if (vsc_lock != NULL)
-		vsc_lock();
+	vsc_lock();
 
 	jjp = (uintptr_t)jp;
 
@@ -178,8 +182,7 @@ VRT_VSC_Alloc(struct vsmw_cluster *vc, struct vsc_seg **sg,
 	VTAILQ_INSERT_TAIL(&vsc_seglist, vsg, list);
 	VWMB();
 	vsg->head->ready = 1;
-	if (vsc_unlock != NULL)
-		vsc_unlock();
+	vsc_unlock();
 	if (sg != NULL)
 		*sg = vsg;
 	return (vsg->ptr);
@@ -190,8 +193,7 @@ VRT_VSC_Destroy(const char *nm, struct vsc_seg *vsg)
 {
 	struct vsc_seg *dvsg;
 
-	if (vsc_lock != NULL)
-		vsc_lock();
+	vsc_lock();
 
 	AN(heritage.proc_vsmw);
 	CHECK_OBJ_NOTNULL(vsg, VSC_SEG_MAGIC);
@@ -209,6 +211,5 @@ VRT_VSC_Destroy(const char *nm, struct vsc_seg *vsg)
 		VTAILQ_REMOVE(&vsc_seglist, dvsg, list);
 		FREE_OBJ(dvsg);
 	}
-	if (vsc_unlock != NULL)
-		vsc_unlock();
+	vsc_unlock();
 }
diff --git a/bin/varnishd/common/common_vsmw.c b/bin/varnishd/common/common_vsmw.c
index beb6fbd..8501cc4 100644
--- a/bin/varnishd/common/common_vsmw.c
+++ b/bin/varnishd/common/common_vsmw.c
@@ -55,6 +55,7 @@
 #include "vfil.h"
 #include "vrnd.h"
 
+#include "heritage.h"
 #include "vsmw.h"
 
 #ifndef MAP_HASSEMAPHORE
@@ -65,6 +66,14 @@
 #  define MAP_NOSYNC 0 /* XXX Linux */
 #endif
 
+static void v_matchproto_(vsm_lock_f)
+vsmw_dummy_lock(void)
+{
+}
+
+vsm_lock_f *vsmw_lock = vsmw_dummy_lock;
+vsm_lock_f *vsmw_unlock = vsmw_dummy_lock;
+
 /*--------------------------------------------------------------------*/
 
 struct vsmw_cluster {
@@ -249,6 +258,7 @@ VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx)
 	struct vsmw_cluster *vc;
 	struct vsmwseg *seg;
 
+	vsmw_lock();
 	vc = vsmw_newcluster(vsmw, len, pfx);
 
 	ALLOC_OBJ(seg, VSMWSEG_MAGIC);
@@ -260,6 +270,7 @@ VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx)
 	REPLACE(seg->id, "");
 	vsmw_addseg(vsmw, seg);
 
+	vsmw_unlock();
 	return (vc);
 }
 
@@ -268,6 +279,7 @@ VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vcp)
 {
 	struct vsmw_cluster *vc;
 
+	vsmw_lock();
 	CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
 	AN(vcp);
 	vc = *vcp;
@@ -283,8 +295,10 @@ VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vcp)
 		 */
 		vsmw_delseg(vsmw, vc->cseg, 1);
 		vc->cseg = NULL;
-		if (vc->refs > 0)
+		if (vc->refs > 0) {
+			vsmw_unlock();
 			return;
+		}
 	}
 	AZ(munmap(vc->ptr, vc->len));
 
@@ -294,6 +308,7 @@ VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vcp)
 		assert (errno == ENOENT);
 	REPLACE(vc->fn, NULL);
 	FREE_OBJ(vc);
+	vsmw_unlock();
 }
 
 /*--------------------------------------------------------------------*/
@@ -305,6 +320,7 @@ VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc,
 {
 	struct vsmwseg *seg;
 
+	vsmw_lock();
 	CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
 	(void)vc;
 
@@ -331,6 +347,7 @@ VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc,
 
 	vsmw_addseg(vsmw, seg);
 
+	vsmw_unlock();
 	return (seg->ptr);
 }
 
@@ -355,6 +372,7 @@ VSMW_Free(struct vsmw *vsmw, void **pp)
 	struct vsmwseg *seg;
 	void *p;
 
+	vsmw_lock();
 	CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
 	AN(pp);
 	p = *pp;
@@ -365,10 +383,14 @@ VSMW_Free(struct vsmw *vsmw, void **pp)
 			break;
 	AN(seg);
 
-	if (!--seg->cluster->refs && seg->cluster->cseg == NULL)
+	if (!--seg->cluster->refs && seg->cluster->cseg == NULL) {
+		vsmw_unlock();
 		VSMW_DestroyCluster(vsmw, &seg->cluster);
+		vsmw_lock();
+	}
 
 	vsmw_delseg(vsmw, seg, 1);
+	vsmw_unlock();
 }
 
 /*--------------------------------------------------------------------*/
@@ -383,6 +405,7 @@ VSMW_New(int vdirfd, int mode, const char *idxname)
 	assert(mode > 0);
 	AN(idxname);
 
+	vsmw_lock();
 	ALLOC_OBJ(vsmw, VSMW_MAGIC);
 	AN(vsmw);
 
@@ -404,6 +427,7 @@ VSMW_New(int vdirfd, int mode, const char *idxname)
 	vsmw_idx_head(vsmw, fd);
 	AZ(close(fd));
 
+	vsmw_unlock();
 	return (vsmw);
 }
 
@@ -413,6 +437,7 @@ VSMW_Destroy(struct vsmw **pp)
 	struct vsmw *vsmw;
 	struct vsmwseg *seg, *s2;
 
+	vsmw_lock();
 	TAKE_OBJ_NOTNULL(vsmw, pp, VSMW_MAGIC);
 	VTAILQ_FOREACH_SAFE(seg, &vsmw->segs, list, s2)
 		vsmw_delseg(vsmw, seg, 0);
@@ -422,4 +447,5 @@ VSMW_Destroy(struct vsmw **pp)
 	VSB_destroy(&vsmw->vsb);
 	AZ(close(vsmw->vdirfd));
 	FREE_OBJ(vsmw);
+	vsmw_unlock();
 }
diff --git a/bin/varnishd/common/heritage.h b/bin/varnishd/common/heritage.h
index 7903cf6..db1dd37 100644
--- a/bin/varnishd/common/heritage.h
+++ b/bin/varnishd/common/heritage.h
@@ -115,8 +115,10 @@ struct transport;
 void XPORT_Init(void);
 const struct transport *XPORT_Find(const char *name);
 
-/* common/common_vsc.c */
-typedef void vsc_callback_f(void);
-extern vsc_callback_f *vsc_lock;
-extern vsc_callback_f *vsc_unlock;
+/* common/common_vsc.c & common/common_vsmw.c */
+typedef void vsm_lock_f(void);
+extern vsm_lock_f *vsc_lock;
+extern vsm_lock_f *vsc_unlock;
+extern vsm_lock_f *vsmw_lock;
+extern vsm_lock_f *vsmw_unlock;
 


More information about the varnish-commit mailing list