r4218 - trunk/varnish-cache/bin/varnishd
phk at projects.linpro.no
phk at projects.linpro.no
Tue Sep 1 12:39:49 CEST 2009
Author: phk
Date: 2009-09-01 12:39:49 +0200 (Tue, 01 Sep 2009)
New Revision: 4218
Modified:
trunk/varnish-cache/bin/varnishd/cache.h
trunk/varnish-cache/bin/varnishd/cache_expire.c
trunk/varnish-cache/bin/varnishd/cache_hash.c
trunk/varnish-cache/bin/varnishd/stevedore.c
trunk/varnish-cache/bin/varnishd/stevedore.h
trunk/varnish-cache/bin/varnishd/storage_persistent.c
Log:
Give each -spersistent segment its own lru list, and prevent the
expiry thread from moving objcores from one list to another.
Modified: trunk/varnish-cache/bin/varnishd/cache.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache.h 2009-08-31 09:10:02 UTC (rev 4217)
+++ trunk/varnish-cache/bin/varnishd/cache.h 2009-09-01 10:39:49 UTC (rev 4218)
@@ -283,11 +283,12 @@
struct object *obj;
struct objhead *objhead;
double timer_when;
- unsigned char flags;
+ unsigned flags;
#define OC_F_ONLRU (1<<0)
#define OC_F_BUSY (1<<1)
#define OC_F_PASS (1<<2)
#define OC_F_PERSISTENT (1<<3)
+#define OC_F_LRUDONTMOVE (1<<4)
unsigned timer_idx;
VTAILQ_ENTRY(objcore) list;
VLIST_ENTRY(objcore) lru_list;
Modified: trunk/varnish-cache/bin/varnishd/cache_expire.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_expire.c 2009-08-31 09:10:02 UTC (rev 4217)
+++ trunk/varnish-cache/bin/varnishd/cache_expire.c 2009-09-01 10:39:49 UTC (rev 4218)
@@ -163,21 +163,33 @@
int
EXP_Touch(const struct object *o)
{
- int retval = 0;
+ int retval;
struct objcore *oc;
struct lru *lru;
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
oc = o->objcore;
if (oc == NULL)
- return (retval);
+ return (0);
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
/* We must have an objhead, otherwise we have no business on a LRU */
CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC);
+
+ /*
+ * For -spersistent we don't move objects on the lru list. Each
+ * segment has its own LRU list, and the order on it is not material
+ * for anything. The code below would move the objects to the
+ * LRU list of the currently open segment, which would prevent
+ * the cleaner from doing its job.
+ */
+ if (oc->flags & OC_F_LRUDONTMOVE)
+ return (0);
+
if (o->objstore == NULL) /* XXX ?? */
- return (retval);
+ return (0);
lru = STV_lru(o->objstore);
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
+ retval = 0;
if (Lck_Trylock(&exp_mtx))
return (retval);
if (oc->flags & OC_F_ONLRU) { /* XXX ?? */
Modified: trunk/varnish-cache/bin/varnishd/cache_hash.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_hash.c 2009-08-31 09:10:02 UTC (rev 4217)
+++ trunk/varnish-cache/bin/varnishd/cache_hash.c 2009-09-01 10:39:49 UTC (rev 4218)
@@ -132,6 +132,7 @@
CHECK_OBJ_NOTNULL(sp->obj, OBJECT_MAGIC);
CHECK_OBJ_NOTNULL(sp->obj->objstore, STORAGE_MAGIC);
CHECK_OBJ_NOTNULL(sp->obj->objstore->stevedore, STEVEDORE_MAGIC);
+ AN(ObjIsBusy(sp->obj));
if (sp->obj->objstore->stevedore->object != NULL)
sp->obj->objstore->stevedore->object(sp);
}
@@ -372,7 +373,10 @@
fprintf(stderr, ">\n");
}
-/**********************************************************************/
+/**********************************************************************
+ * Insert an object which magically appears out of nowhere or, more likely,
+ * comes off some persistent storage device.
+ */
struct objcore *
HSH_Insert(const struct sess *sp)
Modified: trunk/varnish-cache/bin/varnishd/stevedore.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/stevedore.c 2009-08-31 09:10:02 UTC (rev 4217)
+++ trunk/varnish-cache/bin/varnishd/stevedore.c 2009-09-01 10:39:49 UTC (rev 4218)
@@ -43,7 +43,16 @@
static const struct stevedore * volatile stv_next;
-static struct lru *
+/*********************************************************************
+ * NB! Dirty trick alert:
+ *
+ * We use a captive objcore as tail senteniel for LRU lists, but to
+ * make sure it does not get into play by accident, we do _not_
+ * initialize its magic with OBJCORE_MAGIC.
+ *
+ */
+
+struct lru *
LRU_Alloc(void)
{
struct lru *l;
@@ -55,6 +64,8 @@
return (l);
}
+/*********************************************************************/
+
struct storage *
STV_alloc(struct sess *sp, size_t size)
{
Modified: trunk/varnish-cache/bin/varnishd/stevedore.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/stevedore.h 2009-08-31 09:10:02 UTC (rev 4217)
+++ trunk/varnish-cache/bin/varnishd/stevedore.h 2009-09-01 10:39:49 UTC (rev 4218)
@@ -71,6 +71,7 @@
void STV_close(void);
struct lru *STV_lru(const struct storage *st);
+struct lru *LRU_Alloc(void);
int STV_GetFile(const char *fn, int *fdp, const char **fnp, const char *ctx);
uintmax_t STV_FileSize(int fd, const char *size, unsigned *granularity, const char *ctx);
Modified: trunk/varnish-cache/bin/varnishd/storage_persistent.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/storage_persistent.c 2009-08-31 09:10:02 UTC (rev 4217)
+++ trunk/varnish-cache/bin/varnishd/storage_persistent.c 2009-09-01 10:39:49 UTC (rev 4218)
@@ -89,6 +89,7 @@
#define SMP_SEG_MAGIC 0x45c61895
struct smp_sc *sc;
+ struct lru *lru;
VTAILQ_ENTRY(smp_seg) list; /* on smp_sc.smp_segments */
@@ -276,12 +277,11 @@
{
int i;
-#if 1
+ /* XXX: round to pages */
i = msync(ctx->ss, ctx->ss->length + SHA256_LEN, MS_SYNC);
- if (i)
-fprintf(stderr, "SyncSign(%p %s) = %d %s\n",
- ctx->ss, ctx->id, i, strerror(errno));
-#endif
+ if (i && 0)
+ fprintf(stderr, "SyncSign(%p %s) = %d %s\n",
+ ctx->ss, ctx->id, i, strerror(errno));
}
/*--------------------------------------------------------------------
@@ -805,7 +805,6 @@
smp_load_seg(struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
{
void *ptr;
- uint64_t length;
struct smp_segment *ss;
struct smp_object *so;
struct objcore *oc;
@@ -820,7 +819,6 @@
if (smp_chk_sign(ctx))
return;
ptr = SIGN_DATA(ctx);
- length = ctx->ss->length;
ss = ptr;
so = (void*)(sc->ptr + ss->objlist);
no = ss->nalloc;
@@ -829,7 +827,7 @@
continue;
HSH_Prealloc(sp);
oc = sp->wrk->nobjcore;
- oc->flags |= OC_F_PERSISTENT;
+ oc->flags |= OC_F_PERSISTENT | OC_F_LRUDONTMOVE;
oc->flags &= ~OC_F_BUSY;
oc->obj = (void*)so;
oc->smp_seg = sg;
@@ -837,7 +835,7 @@
memcpy(sp->wrk->nobjhead->digest, so->hash, SHA256_LEN);
(void)HSH_Insert(sp);
AZ(sp->wrk->nobjcore);
- EXP_Inject(oc, sc->parent->lru, so->ttl);
+ EXP_Inject(oc, sg->lru, so->ttl);
sg->nalloc++;
}
WRK_SumStat(sp->wrk);
@@ -865,6 +863,7 @@
for(; length > 0; length -= sizeof *ss, ss ++) {
ALLOC_OBJ(sg, SMP_SEG_MAGIC);
AN(sg);
+ sg->lru = LRU_Alloc();
sg->offset = ss->offset;
sg->length = ss->length;
/* XXX: check that they are inside silo */
@@ -1081,6 +1080,7 @@
CHECK_OBJ_NOTNULL(sp->obj->objstore->stevedore, STEVEDORE_MAGIC);
CAST_OBJ_NOTNULL(sc, sp->obj->objstore->priv, SMP_SC_MAGIC);
+ sp->obj->objcore->flags |= OC_F_LRUDONTMOVE;
Lck_Lock(&sc->mtx);
sg = sc->cur_seg;
sc->objreserv += sizeof *so;
More information about the varnish-commit
mailing list