r1586 - in trunk/varnish-cache: bin/varnishd include
des at projects.linpro.no
des at projects.linpro.no
Wed Jun 27 14:56:04 CEST 2007
Author: des
Date: 2007-06-27 14:56:04 +0200 (Wed, 27 Jun 2007)
New Revision: 1586
Added:
trunk/varnish-cache/bin/varnishd/stevedore.c
Modified:
trunk/varnish-cache/bin/varnishd/Makefile.am
trunk/varnish-cache/bin/varnishd/cache.h
trunk/varnish-cache/bin/varnishd/cache_expire.c
trunk/varnish-cache/bin/varnishd/cache_fetch.c
trunk/varnish-cache/bin/varnishd/cache_hash.c
trunk/varnish-cache/bin/varnishd/cache_lru.c
trunk/varnish-cache/bin/varnishd/cache_synthetic.c
trunk/varnish-cache/bin/varnishd/stevedore.h
trunk/varnish-cache/bin/varnishd/storage_file.c
trunk/varnish-cache/bin/varnishd/storage_malloc.c
trunk/varnish-cache/include/shmlog_tags.h
Log:
Mostly finish the LRU code and its integration:
- Wrap the storage code so we don't need to duplicate the "toss out some old
crap and try again" logic everywhere. This will also help when / if we
decide to add support for multiple concurrent storage arenas.
- While I'm at it, implement sma_trim().
- Rework the interaction between the LRU and expiry code. Instead of placing
objects retired by the LRU on death row, immediately terminate them.
- Give the LRU code its own fake session and worker so we don't have to pass
it a session pointer.
- Rework the LRU API, and add LRU_DiscardOne() which discards a single
object. This is what the stevedore code uses.
Known or suspected issues:
- The LRU and expiry code should use the same mutex, and / or the possiblity
for races between them should be examined closely.
- LRU_Init() needs to be looked at and possibly moved.
- LRU_DiscardSpace() and LRU_DiscardTime() are unused and quite possibly useless.
- Logging and statistics related to the LRU need more attention.
- The stevedore API can probably be improved.
Modified: trunk/varnish-cache/bin/varnishd/Makefile.am
===================================================================
--- trunk/varnish-cache/bin/varnishd/Makefile.am 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/Makefile.am 2007-06-27 12:56:04 UTC (rev 1586)
@@ -42,6 +42,7 @@
mgt_vcc.c \
rfc2616.c \
shmlog.c \
+ stevedore.c \
storage_file.c \
storage_malloc.c \
tcp.c \
Modified: trunk/varnish-cache/bin/varnishd/cache.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache.h 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/cache.h 2007-06-27 12:56:04 UTC (rev 1586)
@@ -375,7 +375,7 @@
void EXP_Insert(struct object *o);
void EXP_Init(void);
void EXP_TTLchange(struct object *o);
-void EXP_Retire(struct object *o);
+void EXP_Terminate(struct object *o);
/* cache_fetch.c */
int Fetch(struct sess *sp);
@@ -478,10 +478,12 @@
void VCL_Get(struct VCL_conf **vcc);
/* cache_lru.c */
+// void LRU_Init(void);
void LRU_Enter(struct object *o, time_t stamp);
void LRU_Remove(struct object *o);
-void LRU_DiscardSpace(struct sess *sp, uint64_t quota);
-void LRU_DiscardTime(struct sess *sp, time_t cutoff);
+int LRU_DiscardOne(void);
+int LRU_DiscardSpace(int64_t quota);
+int LRU_DiscardTime(time_t cutoff);
#define VCL_RET_MAC(l,u,b,n)
#define VCL_MET_MAC(l,u,b) void VCL_##l##_method(struct sess *);
Modified: trunk/varnish-cache/bin/varnishd/cache_expire.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_expire.c 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/cache_expire.c 2007-06-27 12:56:04 UTC (rev 1586)
@@ -74,13 +74,25 @@
UNLOCK(&exp_mtx);
}
+/*
+ * Immediately destroy an object. Do not wait for it to expire or trickle
+ * through death row; yank it
+ */
void
-EXP_Retire(struct object *o)
+EXP_Terminate(struct object *o)
{
LOCK(&exp_mtx);
- TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
- VSL_stats->n_deathrow++;
+ if (o->lru_stamp)
+ LRU_Remove(o);
+ if (o->heap_idx)
+ binheap_delete(exp_heap, o->heap_idx);
+ if (o->deathrow.tqe_next) {
+ TAILQ_REMOVE(&exp_deathrow, o, deathrow);
+ VSL_stats->n_deathrow--;
+ }
UNLOCK(&exp_mtx);
+ VSL(SLT_Terminate, 0, "%u", o->xid);
+ HSH_Deref(o);
}
/*--------------------------------------------------------------------
@@ -183,7 +195,10 @@
VCL_timeout_method(sp);
if (sp->handling == VCL_RET_DISCARD) {
- EXP_Retire(o);
+ LOCK(&exp_mtx);
+ TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
+ VSL_stats->n_deathrow++;
+ UNLOCK(&exp_mtx);
continue;
}
assert(sp->handling == VCL_RET_DISCARD);
Modified: trunk/varnish-cache/bin/varnishd/cache_fetch.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_fetch.c 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/cache_fetch.c 2007-06-27 12:56:04 UTC (rev 1586)
@@ -59,8 +59,7 @@
if (cl == 0)
return (0);
- st = stevedore->alloc(stevedore, cl);
- XXXAN(st->stevedore);
+ st = STV_alloc(cl);
TAILQ_INSERT_TAIL(&sp->obj->store, st, list);
st->len = cl;
sp->obj->len = cl;
@@ -147,11 +146,9 @@
/* Get some storage if we don't have any */
if (st == NULL || st->len == st->space) {
v = u;
- if (u < params->fetch_chunksize * 1024 &&
- stevedore->trim != NULL)
+ if (u < params->fetch_chunksize * 1024)
v = params->fetch_chunksize * 1024;
- st = stevedore->alloc(stevedore, v);
- XXXAN(st->stevedore);
+ st = STV_alloc(v);
TAILQ_INSERT_TAIL(&sp->obj->store, st, list);
}
v = st->space - st->len;
@@ -198,9 +195,9 @@
if (st != NULL && st->len == 0) {
TAILQ_REMOVE(&sp->obj->store, st, list);
- stevedore->free(st);
- } else if (st != NULL && stevedore->trim != NULL)
- stevedore->trim(st, st->len);
+ STV_free(st);
+ } else if (st != NULL)
+ STV_trim(st, st->len);
return (0);
}
@@ -226,9 +223,7 @@
st = NULL;
while (1) {
if (v == 0) {
- st = stevedore->alloc(stevedore,
- params->fetch_chunksize * 1024);
- XXXAN(st->stevedore);
+ st = STV_alloc(params->fetch_chunksize * 1024);
TAILQ_INSERT_TAIL(&sp->obj->store, st, list);
p = st->ptr + st->len;
v = st->space - st->len;
@@ -248,9 +243,9 @@
if (st->len == 0) {
TAILQ_REMOVE(&sp->obj->store, st, list);
- stevedore->free(st);
- } else if (stevedore->trim != NULL)
- stevedore->trim(st, st->len);
+ STV_free(st);
+ } else
+ STV_trim(st, st->len);
return (1);
}
@@ -345,7 +340,7 @@
while (!TAILQ_EMPTY(&sp->obj->store)) {
st = TAILQ_FIRST(&sp->obj->store);
TAILQ_REMOVE(&sp->obj->store, st, list);
- stevedore->free(st);
+ STV_free(st);
}
close(vc->fd);
VBE_ClosedFd(sp->wrk, vc, 1);
Modified: trunk/varnish-cache/bin/varnishd/cache_hash.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_hash.c 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/cache_hash.c 2007-06-27 12:56:04 UTC (rev 1586)
@@ -104,7 +104,7 @@
TAILQ_FOREACH_SAFE(st, &o->store, list, stn) {
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
TAILQ_REMOVE(&o->store, st, list);
- st->stevedore->free(st);
+ STV_free(st);
}
}
@@ -260,7 +260,6 @@
free(o->vary);
HSH_Freestore(o);
- LRU_Remove(o);
FREE_OBJ(o);
VSL_stats->n_object--;
Modified: trunk/varnish-cache/bin/varnishd/cache_lru.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_lru.c 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/cache_lru.c 2007-06-27 12:56:04 UTC (rev 1586)
@@ -40,10 +40,33 @@
*/
#define LRU_DELAY 2
+TAILQ_HEAD(lru_head, object);
+
+static struct lru_head lru_list = TAILQ_HEAD_INITIALIZER(lru_list);
static pthread_mutex_t lru_mtx = PTHREAD_MUTEX_INITIALIZER;
-static TAILQ_HEAD(lru_head, object) lru_list;
+static struct sess *lru_session;
+static struct worker lru_worker;
/*
+ * Initialize the LRU data structures.
+ */
+static inline void
+LRU_Init(void)
+{
+ if (lru_session == NULL) {
+ lru_session = SES_New(NULL, 0);
+ XXXAN(lru_session);
+ lru_session->wrk = &lru_worker;
+ lru_worker.magic = WORKER_MAGIC;
+ lru_worker.wlp = lru_worker.wlog;
+ lru_worker.wle = lru_worker.wlog + sizeof lru_worker.wlog;
+ VCL_Get(&lru_session->vcl);
+ } else {
+ VCL_Refresh(&lru_session->vcl);
+ }
+}
+
+/*
* Enter an object into the LRU list, or move it to the head of the list
* if it's already in it and hasn't moved in a while.
*/
@@ -55,12 +78,12 @@
assert(stamp > 0);
if (o->lru_stamp < stamp - LRU_DELAY && o != lru_list.tqh_first) {
// VSL(SLT_LRU_enter, 0, "%u %u %u", o->xid, o->lru_stamp, stamp);
- pthread_mutex_lock(&lru_mtx);
+ LOCK(&lru_mtx);
if (o->lru_stamp != 0)
TAILQ_REMOVE(&lru_list, o, lru);
TAILQ_INSERT_HEAD(&lru_list, o, lru);
o->lru_stamp = stamp;
- pthread_mutex_unlock(&lru_mtx);
+ UNLOCK(&lru_mtx);
}
}
@@ -74,74 +97,123 @@
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
if (o->lru_stamp != 0) {
// VSL(SLT_LRU_remove, 0, "%u", o->xid);
- pthread_mutex_lock(&lru_mtx);
+ LOCK(&lru_mtx);
TAILQ_REMOVE(&lru_list, o, lru);
- pthread_mutex_unlock(&lru_mtx);
+ UNLOCK(&lru_mtx);
}
}
/*
+ * With the LRU lock held, call VCL_discard(). Depending on the result,
+ * either insert the object at the head of the list or dereference it.
+ */
+static int
+LRU_DiscardLocked(struct object *o)
+{
+ struct object *so;
+
+ if (o->busy)
+ return (0);
+
+ /* XXX this is a really bad place to do this */
+ LRU_Init();
+
+ CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+ TAILQ_REMOVE(&lru_list, o, lru);
+
+ lru_session->obj = o;
+ VCL_discard_method(lru_session);
+
+ if (lru_session->handling == VCL_RET_DISCARD) {
+ /* discard: release object */
+ VSL(SLT_ExpKill, 0, "%u %d", o->xid, o->lru_stamp);
+ o->lru_stamp = 0;
+ EXP_Terminate(o);
+ return (1);
+ } else {
+ /* keep: move to front of list */
+ if ((so = TAILQ_FIRST(&lru_list)))
+ o->lru_stamp = so->lru_stamp;
+ TAILQ_INSERT_HEAD(&lru_list, o, lru);
+ return (0);
+ }
+}
+
+/*
+ * Walk through the LRU list, starting at the back, and check each object
+ * until we find one that can be retired. Return the number of objects
+ * that were discarded.
+ */
+int
+LRU_DiscardOne(void)
+{
+ struct object *first = TAILQ_FIRST(&lru_list);
+ struct object *o;
+ int count = 0;
+
+ LOCK(&lru_mtx);
+ while (!count && (o = TAILQ_LAST(&lru_list, lru_head))) {
+ if (LRU_DiscardLocked(o))
+ ++count;
+ if (o == first) {
+ /* full circle */
+ break;
+ }
+ }
+ UNLOCK(&lru_mtx);
+ return (0);
+}
+
+/*
* Walk through the LRU list, starting at the back, and retire objects
- * until our quota is reached or we run out of objects to retire.
+ * until our quota is reached or we run out of objects to retire. Return
+ * the number of objects that were discarded.
*/
-void
-LRU_DiscardSpace(struct sess *sp, uint64_t quota)
+int
+LRU_DiscardSpace(int64_t quota)
{
- struct object *o, *so;
+ struct object *first = TAILQ_FIRST(&lru_list);
+ struct object *o;
+ unsigned int len;
+ int count = 0;
- pthread_mutex_lock(&lru_mtx);
- while ((o = TAILQ_LAST(&lru_list, lru_head))) {
- TAILQ_REMOVE(&lru_list, o, lru);
- so = sp->obj;
- sp->obj = o;
- VCL_discard_method(sp);
- sp->obj = so;
- if (sp->handling == VCL_RET_DISCARD) {
- /* discard: place on deathrow */
- EXP_Retire(o);
- o->lru_stamp = 0;
- if (o->len > quota)
- break;
- quota -= o->len;
- } else {
- /* keep: move to front of list */
- if ((so = TAILQ_FIRST(&lru_list)))
- o->lru_stamp = so->lru_stamp;
- TAILQ_INSERT_HEAD(&lru_list, o, lru);
+ LOCK(&lru_mtx);
+ while (quota > 0 && (o = TAILQ_LAST(&lru_list, lru_head))) {
+ len = o->len;
+ if (LRU_DiscardLocked(o)) {
+ quota -= len;
+ ++count;
}
+ if (o == first) {
+ /* full circle */
+ break;
+ }
}
- pthread_mutex_unlock(&lru_mtx);
+ UNLOCK(&lru_mtx);
+ return (count);
}
/*
* Walk through the LRU list, starting at the back, and retire objects
- * that haven't been accessed since the specified cutoff date.
+ * that haven't been accessed since the specified cutoff date. Return the
+ * number of objects that were discarded.
*/
-void
-LRU_DiscardTime(struct sess *sp, time_t cutoff)
+int
+LRU_DiscardTime(time_t cutoff)
{
- struct object *o, *so;
+ struct object *first = TAILQ_FIRST(&lru_list);
+ struct object *o;
+ int count = 0;
- pthread_mutex_lock(&lru_mtx);
- while ((o = TAILQ_LAST(&lru_list, lru_head))) {
- if (o->lru_stamp >= cutoff)
+ LOCK(&lru_mtx);
+ while ((o = TAILQ_LAST(&lru_list, lru_head)) && o->lru_stamp <= cutoff) {
+ if (LRU_DiscardLocked(o))
+ ++count;
+ if (o == first) {
+ /* full circle */
break;
- TAILQ_REMOVE(&lru_list, o, lru);
- so = sp->obj;
- sp->obj = o;
- VCL_discard_method(sp);
- sp->obj = so;
- if (sp->handling == VCL_RET_DISCARD) {
- /* discard: place on deathrow */
- EXP_Retire(o);
- } else {
- /* keep: move to front of list */
- if ((so = TAILQ_FIRST(&lru_list)) && so->lru_stamp > cutoff)
- o->lru_stamp = so->lru_stamp;
- else
- o->lru_stamp = cutoff;
- TAILQ_INSERT_HEAD(&lru_list, o, lru);
}
}
- pthread_mutex_unlock(&lru_mtx);
+ UNLOCK(&lru_mtx);
+ return (count);
}
Modified: trunk/varnish-cache/bin/varnishd/cache_synthetic.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_synthetic.c 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/cache_synthetic.c 2007-06-27 12:56:04 UTC (rev 1586)
@@ -90,7 +90,7 @@
/* allocate space for body */
/* XXX what if the object already has a body? */
- st = stevedore->alloc(stevedore, 1024);
+ st = STV_alloc(1024);
XXXAN(st->stevedore);
TAILQ_INSERT_TAIL(&sp->obj->store, st, list);
Added: trunk/varnish-cache/bin/varnishd/stevedore.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/stevedore.c (rev 0)
+++ trunk/varnish-cache/bin/varnishd/stevedore.c 2007-06-27 12:56:04 UTC (rev 1586)
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2007 Linpro AS
+ * All rights reserved.
+ *
+ * Author: Dag-Erling Smørgav <des at linpro.no>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "cache.h"
+
+struct storage *
+STV_alloc(size_t size)
+{
+ struct storage *st;
+
+ AN(stevedore);
+ AN(stevedore->alloc);
+ do {
+ if ((st = stevedore->alloc(stevedore, size)) == NULL)
+ LRU_DiscardOne();
+ } while (st == NULL);
+ return (st);
+}
+
+void
+STV_trim(struct storage *st, size_t size)
+{
+
+ AN(st->stevedore);
+ if (st->stevedore->trim)
+ st->stevedore->trim(st, size);
+}
+
+void
+STV_free(struct storage *st)
+{
+
+ AN(st->stevedore);
+ AN(stevedore->free);
+ st->stevedore->free(st);
+}
Modified: trunk/varnish-cache/bin/varnishd/stevedore.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/stevedore.h 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/stevedore.h 2007-06-27 12:56:04 UTC (rev 1586)
@@ -50,3 +50,7 @@
/* private fields */
void *priv;
};
+
+struct storage *STV_alloc(size_t size);
+void STV_trim(struct storage *st, size_t size);
+void STV_free(struct storage *st);
Modified: trunk/varnish-cache/bin/varnishd/storage_file.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/storage_file.c 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/storage_file.c 2007-06-27 12:56:04 UTC (rev 1586)
@@ -631,6 +631,10 @@
LOCK(&sc->mtx);
VSL_stats->sm_nreq++;
smf = alloc_smf(sc, size);
+ if (smf == NULL) {
+ UNLOCK(&sc->mtx);
+ return (NULL);
+ }
CHECK_OBJ_NOTNULL(smf, SMF_MAGIC);
VSL_stats->sm_nobj++;
VSL_stats->sm_balloc += smf->size;
Modified: trunk/varnish-cache/bin/varnishd/storage_malloc.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/storage_malloc.c 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/bin/varnishd/storage_malloc.c 2007-06-27 12:56:04 UTC (rev 1586)
@@ -49,7 +49,8 @@
VSL_stats->sm_nreq++;
sma = calloc(sizeof *sma, 1);
- XXXAN(sma);
+ if (sma == NULL)
+ return (NULL);
sma->s.priv = sma;
sma->s.ptr = malloc(size);
XXXAN(sma->s.ptr);
@@ -68,6 +69,7 @@
{
struct sma *sma;
+ CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
sma = s->priv;
VSL_stats->sm_nobj--;
VSL_stats->sm_balloc -= sma->s.space;
@@ -75,8 +77,25 @@
free(sma);
}
+static void
+sma_trim(struct storage *s, size_t size)
+{
+ struct sma *sma;
+ void *p;
+
+ CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
+ sma = s->priv;
+ if ((p = realloc(sma->s.ptr, size)) != NULL) {
+ VSL_stats->sm_balloc -= sma->s.space;
+ sma->s.ptr = p;
+ sma->s.space = size;
+ VSL_stats->sm_balloc += sma->s.space;
+ }
+}
+
struct stevedore sma_stevedore = {
.name = "malloc",
.alloc = sma_alloc,
- .free = sma_free
+ .free = sma_free,
+ .trim = sma_trim,
};
Modified: trunk/varnish-cache/include/shmlog_tags.h
===================================================================
--- trunk/varnish-cache/include/shmlog_tags.h 2007-06-27 12:43:08 UTC (rev 1585)
+++ trunk/varnish-cache/include/shmlog_tags.h 2007-06-27 12:56:04 UTC (rev 1586)
@@ -92,3 +92,4 @@
SLTM(ExpPick)
SLTM(ExpKill)
SLTM(WorkThread)
+SLTM(Terminate)
More information about the varnish-commit
mailing list