r1570 - trunk/varnish-cache/bin/varnishd

des at projects.linpro.no des at projects.linpro.no
Mon Jun 25 19:04:10 CEST 2007


Author: des
Date: 2007-06-25 19:04:09 +0200 (Mon, 25 Jun 2007)
New Revision: 1570

Added:
   trunk/varnish-cache/bin/varnishd/cache_lru.c
Modified:
   trunk/varnish-cache/bin/varnishd/Makefile.am
   trunk/varnish-cache/bin/varnishd/cache.h
   trunk/varnish-cache/bin/varnishd/cache_expire.c
   trunk/varnish-cache/bin/varnishd/cache_hash.c
Log:
First step in implementing early retirement of objects when the cache fills
up: implement a "sloppy" LRU list.  An object is placed on the list (or moved
to the head of the list if it's already on it and hasn't moved recently) by
calling LRU_Enter(), and removed by calling LRU_Remove().  LRU_DiscardSpace()
will iterate through the LRU list, starting at the back, and retire objects
(by adding them to the deathrow list) until the sum of the length of the
retired objects reaches a certain number.  Similarly, LRU_DiscardTime() will
retire objects which haven't moved since a specified cutoff date.  In both
cases, vcl_discard() will be given a chance to inspect the object and veto
its retirement.

Currently, LRU_Enter() and LRU_Remove() are called from HSH_Lookup() and
HSH_Deref() respectively.  There may be better alternatives.

Neither LRU_DiscardSpace() nor LRU_DiscardTime() is currently called from
anywhere.  There are a number of issues to consider: for instance, even if
LRU_DiscardSpace() is called when a high-water mark is reached, there is
still a possibility that the cache might fill up before it has had a chance
to finish and the hangman has had a chance to process the deathrow list.


Modified: trunk/varnish-cache/bin/varnishd/Makefile.am
===================================================================
--- trunk/varnish-cache/bin/varnishd/Makefile.am	2007-06-25 16:25:29 UTC (rev 1569)
+++ trunk/varnish-cache/bin/varnishd/Makefile.am	2007-06-25 17:04:09 UTC (rev 1570)
@@ -19,6 +19,7 @@
 	cache_fetch.c \
 	cache_hash.c \
 	cache_http.c \
+	cache_lru.c \
 	cache_main.c \
 	cache_pool.c \
 	cache_pipe.c \

Modified: trunk/varnish-cache/bin/varnishd/cache.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache.h	2007-06-25 16:25:29 UTC (rev 1569)
+++ trunk/varnish-cache/bin/varnishd/cache.h	2007-06-25 17:04:09 UTC (rev 1570)
@@ -254,6 +254,9 @@
 	TAILQ_HEAD(, storage)	store;
 
 	TAILQ_HEAD(, sess)	waitinglist;
+
+	time_t			lru_stamp;
+	TAILQ_ENTRY(object)	lru;
 };
 
 struct objhead {
@@ -372,6 +375,7 @@
 void EXP_Insert(struct object *o);
 void EXP_Init(void);
 void EXP_TTLchange(struct object *o);
+void EXP_Retire(struct object *o);
 
 /* cache_fetch.c */
 int Fetch(struct sess *sp);
@@ -473,6 +477,12 @@
 void VCL_Rel(struct VCL_conf **vcc);
 void VCL_Get(struct VCL_conf **vcc);
 
+/* cache_lru.c */
+void LRU_Enter(struct object *o, time_t stamp);
+void LRU_Remove(struct object *o);
+void LRU_DiscardSpace(struct sess *sp, uint64_t quota);
+void LRU_DiscardTime(struct sess *sp, time_t cutoff);
+
 #define VCL_RET_MAC(l,u,b,n)
 #define VCL_MET_MAC(l,u,b) void VCL_##l##_method(struct sess *);
 #include "vcl_returns.h"

Modified: trunk/varnish-cache/bin/varnishd/cache_expire.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_expire.c	2007-06-25 16:25:29 UTC (rev 1569)
+++ trunk/varnish-cache/bin/varnishd/cache_expire.c	2007-06-25 17:04:09 UTC (rev 1570)
@@ -74,6 +74,15 @@
 	UNLOCK(&exp_mtx);
 }
 
+void
+EXP_Retire(struct object *o)
+{
+	LOCK(&exp_mtx);
+	TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
+	VSL_stats->n_deathrow++;
+	UNLOCK(&exp_mtx);
+}
+
 /*--------------------------------------------------------------------
  * This thread monitors deathrow and kills objects when they time out.
  */
@@ -174,10 +183,7 @@
 		VCL_timeout_method(sp);
 
 		if (sp->handling == VCL_RET_DISCARD) {
-			LOCK(&exp_mtx);
-			TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
-			VSL_stats->n_deathrow++;
-			UNLOCK(&exp_mtx);
+			EXP_Retire(o);
 			continue;
 		}
 		assert(sp->handling == VCL_RET_DISCARD);

Modified: trunk/varnish-cache/bin/varnishd/cache_hash.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_hash.c	2007-06-25 16:25:29 UTC (rev 1569)
+++ trunk/varnish-cache/bin/varnishd/cache_hash.c	2007-06-25 17:04:09 UTC (rev 1570)
@@ -166,6 +166,7 @@
 	if (o != NULL) {
 		UNLOCK(&oh->mtx);
 		(void)hash->deref(oh);
+		LRU_Enter(o, sp->t_req.tv_sec);
 		return (o);
 	}
 
@@ -177,6 +178,7 @@
 	/* NB: do not deref objhead the new object inherits our reference */
 	UNLOCK(&oh->mtx);
 	BAN_NewObj(o);
+	LRU_Enter(o, sp->t_req.tv_sec);
 	return (o);
 }
 
@@ -258,6 +260,7 @@
 		free(o->vary);
 
 	HSH_Freestore(o);
+	LRU_Remove(o);
 	FREE_OBJ(o);
 	VSL_stats->n_object--;
 

Added: trunk/varnish-cache/bin/varnishd/cache_lru.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_lru.c	                        (rev 0)
+++ trunk/varnish-cache/bin/varnishd/cache_lru.c	2007-06-25 17:04:09 UTC (rev 1570)
@@ -0,0 +1,147 @@
+/*-
+ * Copyright (c) 2007 Linpro AS
+ * All rights reserved.
+ *
+ * Author: Dag-Erling Smørgav <des at linpro.no>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "shmlog.h"
+#include "cache.h"
+#include "queue.h"
+
+/*
+ * For performance reasons, objects are only moved to the head of the LRU
+ * list when they've been in their current position for at least LRU_DELAY
+ * seconds, rather than on every access.  This should probably be a
+ * run-time parameter.
+ */
+#define LRU_DELAY 2
+
+static pthread_mutex_t lru_mtx = PTHREAD_MUTEX_INITIALIZER;
+static TAILQ_HEAD(lru_head, object) lru_list;
+
+/*
+ * Enter an object into the LRU list, or move it to the head of the list
+ * if it's already in it and hasn't moved in a while.
+ */
+void
+LRU_Enter(struct object *o, time_t stamp)
+{
+
+	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+	assert(stamp > 0);
+	if (o->lru_stamp < stamp - LRU_DELAY && o != lru_list.tqh_first) {
+		// VSL(SLT_LRU_enter, 0, "%u %u %u", o->xid, o->lru_stamp, stamp);
+		pthread_mutex_lock(&lru_mtx);
+		if (o->lru_stamp != 0)
+			TAILQ_REMOVE(&lru_list, o, lru);
+		TAILQ_INSERT_HEAD(&lru_list, o, lru);
+		o->lru_stamp = stamp;
+		pthread_mutex_unlock(&lru_mtx);
+	}
+}
+
+/*
+ * Remove an object from the LRU list.
+ */
+void
+LRU_Remove(struct object *o)
+{
+
+	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+	if (o->lru_stamp != 0) {
+		// VSL(SLT_LRU_remove, 0, "%u", o->xid);
+		pthread_mutex_lock(&lru_mtx);
+		TAILQ_REMOVE(&lru_list, o, lru);
+		pthread_mutex_unlock(&lru_mtx);
+	}
+}
+
+/*
+ * Walk through the LRU list, starting at the back, and retire objects
+ * until our quota is reached or we run out of objects to retire.
+ */
+void
+LRU_DiscardSpace(struct sess *sp, uint64_t quota)
+{
+	struct object *o, *so;
+
+	pthread_mutex_lock(&lru_mtx);
+	while ((o = TAILQ_LAST(&lru_list, lru_head))) {
+		TAILQ_REMOVE(&lru_list, o, lru);
+		so = sp->obj;
+		sp->obj = o;
+		VCL_discard_method(sp);
+		sp->obj = so;
+		if (sp->handling == VCL_RET_DISCARD) {
+			/* discard: place on deathrow */
+			EXP_Retire(o);
+			o->lru_stamp = 0;
+			if (o->len > quota)
+				break;
+			quota -= o->len;
+		} else {
+			/* keep: move to front of list */
+			if ((so = TAILQ_FIRST(&lru_list)))
+				o->lru_stamp = so->lru_stamp;
+			TAILQ_INSERT_HEAD(&lru_list, o, lru);
+		}
+	}
+	pthread_mutex_unlock(&lru_mtx);
+}
+
+/*
+ * Walk through the LRU list, starting at the back, and retire objects
+ * that haven't been accessed since the specified cutoff date.
+ */
+void
+LRU_DiscardTime(struct sess *sp, time_t cutoff)
+{
+	struct object *o, *so;
+
+	pthread_mutex_lock(&lru_mtx);
+	while ((o = TAILQ_LAST(&lru_list, lru_head))) {
+		if (o->lru_stamp >= cutoff)
+			break;
+		TAILQ_REMOVE(&lru_list, o, lru);
+		so = sp->obj;
+		sp->obj = o;
+		VCL_discard_method(sp);
+		sp->obj = so;
+		if (sp->handling == VCL_RET_DISCARD) {
+			/* discard: place on deathrow */
+			EXP_Retire(o);
+		} else {
+			/* keep: move to front of list */
+			if ((so = TAILQ_FIRST(&lru_list)) && so->lru_stamp > cutoff)
+				o->lru_stamp = so->lru_stamp;
+			else
+				o->lru_stamp = cutoff;
+			TAILQ_INSERT_HEAD(&lru_list, o, lru);
+		}
+	}
+	pthread_mutex_unlock(&lru_mtx);
+}


Property changes on: trunk/varnish-cache/bin/varnishd/cache_lru.c
___________________________________________________________________
Name: svn:keywords
   + Id




More information about the varnish-commit mailing list