r4069 - trunk/varnish-cache/bin/varnishd

phk at projects.linpro.no phk at projects.linpro.no
Mon May 11 10:57:00 CEST 2009


Author: phk
Date: 2009-05-11 10:57:00 +0200 (Mon, 11 May 2009)
New Revision: 4069

Modified:
   trunk/varnish-cache/bin/varnishd/cache.h
   trunk/varnish-cache/bin/varnishd/cache_acceptor.c
   trunk/varnish-cache/bin/varnishd/cache_pool.c
   trunk/varnish-cache/bin/varnishd/cache_session.c
   trunk/varnish-cache/bin/varnishd/cache_ws.c
Log:
Fix an inconsequential oversight in session management, and prevent it from
happening again:

We optimize session allocation, SES_New(), to minimize the amount
of locking the VCA_thread participates in with flip-flop lists of
free sessions: Allocate from one list, free to the other, flip lists
only when necessary.

This scheme only works if nobody but VCA_thread calls SES_New().

Background worker threads need dummy sessions and thus called into
SES_New() as well.

These calls all happen during startup, so they do in fact not mess
up the locking, but made mockery of a number of very stern comments.

Add SES_Alloc() for such "other uses" and enforce VCA_thread monopoly
with an assert.


Modified: trunk/varnish-cache/bin/varnishd/cache.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache.h	2009-05-11 08:50:45 UTC (rev 4068)
+++ trunk/varnish-cache/bin/varnishd/cache.h	2009-05-11 08:57:00 UTC (rev 4069)
@@ -434,6 +434,7 @@
 void vca_close_session(struct sess *sp, const char *why);
 void VCA_Prep(struct sess *sp);
 void VCA_Init(void);
+extern pthread_t VCA_thread;
 
 /* cache_backend.c */
 
@@ -591,6 +592,7 @@
 /* cache_session.c [SES] */
 void SES_Init(void);
 struct sess *SES_New(const struct sockaddr *addr, unsigned len);
+struct sess *SES_Alloc(const struct sockaddr *addr, unsigned len);
 void SES_Delete(struct sess *sp);
 void SES_Charge(struct sess *sp);
 void SES_ResetBackendTimeouts(struct sess *sp);

Modified: trunk/varnish-cache/bin/varnishd/cache_acceptor.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_acceptor.c	2009-05-11 08:50:45 UTC (rev 4068)
+++ trunk/varnish-cache/bin/varnishd/cache_acceptor.c	2009-05-11 08:57:00 UTC (rev 4069)
@@ -65,7 +65,7 @@
 
 static struct waiter const *vca_act;
 
-static pthread_t	vca_thread_acct;
+pthread_t		VCA_thread;
 static struct timeval	tv_sndtimeo;
 static struct timeval	tv_rcvtimeo;
 static const struct linger linger = {
@@ -332,7 +332,7 @@
 	if (vca_act->pass == NULL)
 		AZ(pipe(vca_pipes));
 	vca_act->init();
-	AZ(pthread_create(&vca_thread_acct, NULL, vca_acct, NULL));
+	AZ(pthread_create(&VCA_thread, NULL, vca_acct, NULL));
 	VSL(SLT_Debug, 0, "Acceptor is %s", vca_act->name);
 }
 

Modified: trunk/varnish-cache/bin/varnishd/cache_pool.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_pool.c	2009-05-11 08:50:45 UTC (rev 4068)
+++ trunk/varnish-cache/bin/varnishd/cache_pool.c	2009-05-11 08:57:00 UTC (rev 4069)
@@ -701,7 +701,7 @@
 
 	CAST_OBJ_NOTNULL(bt, arg, BGTHREAD_MAGIC);
 	THR_SetName(bt->name);
-	sp = SES_New(NULL, 0);
+	sp = SES_Alloc(NULL, 0);
 	XXXAN(sp);
 	memset(&ww, 0, sizeof ww);
 	memset(&stats, 0, sizeof stats);

Modified: trunk/varnish-cache/bin/varnishd/cache_session.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_session.c	2009-05-11 08:50:45 UTC (rev 4068)
+++ trunk/varnish-cache/bin/varnishd/cache_session.c	2009-05-11 08:57:00 UTC (rev 4069)
@@ -104,35 +104,14 @@
 
 /*--------------------------------------------------------------------*/
 
-struct sess *
-SES_New(const struct sockaddr *addr, unsigned len)
+static struct sess *
+ses_setup(struct sessmem *sm, const struct sockaddr *addr, unsigned len)
 {
-	struct sessmem *sm;
 	struct sess *sp;
 	volatile unsigned u;
 
-	/*
-	 * One of the two queues is unlocked because only one
-	 * thread ever gets here to empty it.
-	 */
-	assert(ses_qp <= 1);
-	sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]);
 	if (sm == NULL) {
 		/*
-		 * If that queue is empty, flip queues holding the lock
-		 * and try the new unlocked queue.
-		 */
-		Lck_Lock(&ses_mem_mtx);
-		ses_qp = 1 - ses_qp;
-		Lck_Unlock(&ses_mem_mtx);
-		sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]);
-	}
-	if (sm != NULL) {
-		VTAILQ_REMOVE(&ses_free_mem[ses_qp], sm, list);
-	} else {
-		/*
-		 * If that fails, alloc new one.
-		 *
 		 * It is not necessary to lock mem_workspace, but we
 		 * need to cache it locally, to make sure we get a
 		 * consistent view of it.
@@ -178,6 +157,43 @@
 	return (sp);
 }
 
+/*--------------------------------------------------------------------
+ * Try to recycle an existing session.
+ */
+
+struct sess *
+SES_New(const struct sockaddr *addr, unsigned len)
+{
+	struct sessmem *sm;
+
+	assert(pthread_self() == VCA_thread);
+	assert(ses_qp <= 1);
+	sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]);
+	if (sm == NULL) {
+		/*
+		 * If that queue is empty, flip queues holding the lock
+		 * and try the new unlocked queue.
+		 */
+		Lck_Lock(&ses_mem_mtx);
+		ses_qp = 1 - ses_qp;
+		Lck_Unlock(&ses_mem_mtx);
+		sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]);
+	}
+	if (sm != NULL)
+		VTAILQ_REMOVE(&ses_free_mem[ses_qp], sm, list);
+	return (ses_setup(sm, addr, len));
+}
+
+/*--------------------------------------------------------------------*/
+
+struct sess *
+SES_Alloc(const struct sockaddr *addr, unsigned len)
+{
+	return (ses_setup(NULL, addr, len));
+}
+
+/*--------------------------------------------------------------------*/
+
 void
 SES_Delete(struct sess *sp)
 {

Modified: trunk/varnish-cache/bin/varnishd/cache_ws.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_ws.c	2009-05-11 08:50:45 UTC (rev 4068)
+++ trunk/varnish-cache/bin/varnishd/cache_ws.c	2009-05-11 08:57:00 UTC (rev 4069)
@@ -82,6 +82,10 @@
 	WS_Assert(ws);
 }
 
+/*
+ * Reset a WS to start or a given pointer, likely from WS_Snapshot
+ */
+
 void
 WS_Reset(struct ws *ws, char *p)
 {



More information about the varnish-commit mailing list