[master] 052473d Split the silo-centric from the policy parts of persistent storage code.

Poul-Henning Kamp phk at varnish-cache.org
Tue Feb 8 11:50:41 CET 2011


commit 052473ddffd2ef94345e06abe3d864bd66ed479e
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Tue Feb 8 10:49:59 2011 +0000

    Split the silo-centric from the policy parts of persistent storage code.

diff --git a/bin/varnishd/Makefile.am b/bin/varnishd/Makefile.am
index 208be08..b8db6e0 100644
--- a/bin/varnishd/Makefile.am
+++ b/bin/varnishd/Makefile.am
@@ -65,6 +65,7 @@ varnishd_SOURCES = \
 	storage_malloc.c \
 	storage_persistent.c \
 	storage_persistent_mgt.c \
+	storage_persistent_silo.c \
 	storage_persistent_subr.c \
 	storage_synth.c \
 	storage_umem.c \
diff --git a/bin/varnishd/storage_persistent.c b/bin/varnishd/storage_persistent.c
index 4a971cb..1ae9e80 100644
--- a/bin/varnishd/storage_persistent.c
+++ b/bin/varnishd/storage_persistent.c
@@ -66,262 +66,6 @@ SVNID("$Id$")
 static VTAILQ_HEAD(,smp_sc)	silos = VTAILQ_HEAD_INITIALIZER(silos);
 
 /*--------------------------------------------------------------------
- * Write the segmentlist back to the silo.
- *
- * We write the first copy, sync it synchronously, then write the
- * second copy and sync it synchronously.
- *
- * Provided the kernel doesn't lie, that means we will always have
- * at least one valid copy on in the silo.
- */
-
-static void
-smp_save_seg(const struct smp_sc *sc, struct smp_signctx *ctx)
-{
-	struct smp_segptr *ss;
-	struct smp_seg *sg;
-	uint64_t length;
-
-	Lck_AssertHeld(&sc->mtx);
-	smp_reset_sign(ctx);
-	ss = SIGN_DATA(ctx);
-	length = 0;
-	VTAILQ_FOREACH(sg, &sc->segments, list) {
-		assert(sg->p.offset < sc->mediasize);
-		assert(sg->p.offset + sg->p.length <= sc->mediasize);
-		*ss = sg->p;
-		ss++;
-		length += sizeof *ss;
-	}
-	smp_append_sign(ctx, SIGN_DATA(ctx), length);
-	smp_sync_sign(ctx);
-}
-
-static void
-smp_save_segs(struct smp_sc *sc)
-{
-	struct smp_seg *sg, *sg2;
-
-	Lck_AssertHeld(&sc->mtx);
-
-	/*
-	 * Remove empty segments from the front of the list
-	 * before we write the segments to disk.
-	 */
-	VTAILQ_FOREACH_SAFE(sg, &sc->segments, list, sg2) {
-		if (sg->nobj > 0)
-			break;
-		if (sg == sc->cur_seg)
-			continue;
-		VTAILQ_REMOVE(&sc->segments, sg, list);
-		free(sg);
-	}
-	smp_save_seg(sc, &sc->seg1);
-	smp_save_seg(sc, &sc->seg2);
-}
-
-
-/*---------------------------------------------------------------------
- */
-
-static struct smp_object *
-smp_find_so(const struct smp_seg *sg, const struct objcore *oc)
-{
-	struct smp_object *so;
-	unsigned smp_idx;
-
-	smp_idx = oc->priv2;
-	assert(smp_idx > 0);
-	assert(smp_idx <= sg->p.lobjlist);
-	so = &sg->objs[sg->p.lobjlist - smp_idx];
-	return (so);
-}
-
-/*---------------------------------------------------------------------
- * Check if a given storage structure is valid to use
- */
-
-static int
-smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
-    const struct storage *st)
-{
-	struct smp_seg *sg2;
-	const uint8_t *pst;
-	uint64_t o;
-
-	(void)sg;		/* XXX: faster: Start search from here */
-	pst = (const void *)st;
-
-	if (pst < (sc->base + sc->ident->stuff[SMP_SPC_STUFF]))
-		return (0x01);		/* Before silo payload start */
-	if (pst > (sc->base + sc->ident->stuff[SMP_END_STUFF]))
-		return (0x02);		/* After silo end */
-
-	o = pst - sc->base;
-
-	/* Find which segment contains the storage structure */
-	VTAILQ_FOREACH(sg2, &sc->segments, list)
-		if (o > sg2->p.offset && (o + sizeof(*st)) < sg2->p.objlist)
-			break;
-	if (sg2 == NULL)
-		return (0x04);		/* No claiming segment */
-	if (!(sg2->flags & SMP_SEG_LOADED))
-		return (0x08);		/* Claiming segment not loaded */
-
-	/* It is now safe to access the storage structure */
-	if (st->magic != STORAGE_MAGIC)
-		return (0x10);		/* Not enough magic */
-
-	if (o + st->space >= sg2->p.objlist)
-		return (0x20);		/* Allocation not inside segment */
-
-	if (st->len > st->space)
-		return (0x40);		/* Plain bad... */
-
-	/*
-	 * XXX: We could patch up st->stevedore and st->priv here
-	 * XXX: but if things go right, we will never need them.
-	 */
-	return (0);
-}
-
-/*---------------------------------------------------------------------
- * objcore methods for persistent objects
- */
-
-static struct object *
-smp_oc_getobj(struct worker *wrk, struct objcore *oc)
-{
-	struct object *o;
-	struct smp_seg *sg;
-	struct smp_object *so;
-	struct storage *st;
-	uint64_t l;
-	int bad;
-
-	/* Some calls are direct, but they should match anyway */
-	assert(oc->methods->getobj == smp_oc_getobj);
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	if (wrk == NULL)
-		AZ(oc->flags & OC_F_NEEDFIXUP);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	so = smp_find_so(sg, oc);
-
-	o = (void*)(sg->sc->base + so->ptr);
-	/*
-	 * The object may not be in this segment since we allocate it
-	 * In a separate operation than the smp_object.  We could check
-	 * that it is in a later segment, but that would be complicated.
-	 * XXX: For now, be happy if it is inside th silo
-	 */
-	ASSERT_PTR_IN_SILO(sg->sc, o);
-	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
-
-	/*
-	 * If this flag is not set, it will not be, and the lock is not
-	 * needed to test it.
-	 */
-	if (!(oc->flags & OC_F_NEEDFIXUP))
-		return (o);
-
-	AN(wrk);
-	Lck_Lock(&sg->sc->mtx);
-	/* Check again, we might have raced. */
-	if (oc->flags & OC_F_NEEDFIXUP) {
-		/* We trust caller to have a refcnt for us */
-		o->objcore = oc;
-
-		bad = 0;
-		l = 0;
-		VTAILQ_FOREACH(st, &o->store, list) {
-			bad |= smp_loaded_st(sg->sc, sg, st);
-			if (bad)
-				break;
-			l += st->len;
-		}
-		if (l != o->len)
-			bad |= 0x100;
-
-		if(bad) {
-			o->ttl = 0;
-			o->grace = 0;
-			so->ttl = 0;
-		}
-
-		sg->nfixed++;
-		wrk->stats.n_object++;
-		wrk->stats.n_vampireobject--;
-		oc->flags &= ~OC_F_NEEDFIXUP;
-	}
-	Lck_Unlock(&sg->sc->mtx);
-	return (o);
-}
-
-static void
-smp_oc_updatemeta(struct objcore *oc)
-{
-	struct object *o;
-	struct smp_seg *sg;
-	struct smp_object *so;
-	double mttl;
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	o = smp_oc_getobj(NULL, oc);
-	AN(o);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	CHECK_OBJ_NOTNULL(sg->sc, SMP_SC_MAGIC);
-	so = smp_find_so(sg, oc);
-
-	if (isnan(o->grace))
-		mttl = o->ttl;
-	else
-		mttl = - (o->ttl + o->grace);
-
-	if (sg == sg->sc->cur_seg) {
-		/* Lock necessary, we might race close_seg */
-		Lck_Lock(&sg->sc->mtx);
-		so->ban = o->ban_t;
-		so->ttl = mttl;
-		Lck_Unlock(&sg->sc->mtx);
-	} else {
-		so->ban = o->ban_t;
-		so->ttl = mttl;
-	}
-}
-
-static void __match_proto__()
-smp_oc_freeobj(struct objcore *oc)
-{
-	struct smp_seg *sg;
-	struct smp_object *so;
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	so = smp_find_so(sg, oc);
-
-	Lck_Lock(&sg->sc->mtx);
-	so->ttl = 0;
-	so->ptr = 0;
-
-	assert(sg->nobj > 0);
-	assert(sg->nfixed > 0);
-	sg->nobj--;
-	sg->nfixed--;
-
-	Lck_Unlock(&sg->sc->mtx);
-}
-
-static struct objcore_methods smp_oc_methods = {
-	.getobj =		smp_oc_getobj,
-	.updatemeta =		smp_oc_updatemeta,
-	.freeobj =		smp_oc_freeobj,
-};
-
-/*--------------------------------------------------------------------
  * Add a new ban to all silos
  */
 
@@ -422,91 +166,6 @@ smp_open_bans(struct smp_sc *sc, struct smp_signctx *ctx)
 }
 
 
-/*--------------------------------------------------------------------*/
-
-static uint64_t
-smp_segend(const struct smp_seg *sg)
-{
-
-	return (sg->p.offset + sg->p.length);
-}
-
-static uint64_t
-smp_spaceleft(const struct smp_sc *sc, const struct smp_seg *sg)
-{
-
-	IASSERTALIGN(sc, sc->next_bot);
-	assert(sc->next_bot <= sc->next_top - IRNUP(sc, SMP_SIGN_SPACE));
-	assert(sc->next_bot >= sg->p.offset);
-	assert(sc->next_top < sg->p.offset + sg->p.length);
-	return ((sc->next_top - sc->next_bot) - IRNUP(sc, SMP_SIGN_SPACE));
-}
-
-/*--------------------------------------------------------------------
- * Load segments
- *
- * The overall objective is to register the existence of an object, based
- * only on the minimally sized struct smp_object, without causing the
- * main object to be faulted in.
- *
- * XXX: We can test this by mprotecting the main body of the segment
- * XXX: until the first fixup happens, or even just over this loop,
- * XXX: However: the requires that the smp_objects starter further
- * XXX: into the segment than a page so that they do not get hit
- * XXX: by the protection.
- */
-
-static void
-smp_load_seg(const struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
-{
-	struct smp_object *so;
-	struct objcore *oc;
-	uint32_t no;
-	double t_now = TIM_real();
-	struct smp_signctx ctx[1];
-
-	ASSERT_SILO_THREAD(sc);
-	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
-	CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
-	CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
-	assert(sg->flags & SMP_SEG_MUSTLOAD);
-	sg->flags &= ~SMP_SEG_MUSTLOAD;
-	AN(sg->p.offset);
-	if (sg->p.objlist == 0)
-		return;
-	smp_def_sign(sc, ctx, sg->p.offset, "SEGHEAD");
-	if (smp_chk_sign(ctx))
-		return;
-
-	/* test SEGTAIL */
-	/* test OBJIDX */
-	so = (void*)(sc->base + sg->p.objlist);
-	sg->objs = so;
-	no = sg->p.lobjlist;
-	/* Clear the bogus "hold" count */
-	sg->nobj = 0;
-	for (;no > 0; so++,no--) {
-		if (so->ttl > 0 && so->ttl < t_now)
-			continue;
-		if (so->ttl < 0 && -so->ttl < t_now)
-			continue;
-		HSH_Prealloc(sp);
-		oc = sp->wrk->nobjcore;
-		oc->flags |= OC_F_NEEDFIXUP | OC_F_LRUDONTMOVE;
-		oc->flags &= ~OC_F_BUSY;
-		oc->priv = sg;
-		oc->priv2 = no;
-		oc->methods = &smp_oc_methods;
-		oc->ban = BAN_RefBan(oc, so->ban, sc->tailban);
-		memcpy(sp->wrk->nobjhead->digest, so->hash, SHA256_LEN);
-		(void)HSH_Insert(sp);
-		AZ(sp->wrk->nobjcore);
-		EXP_Inject(oc, sg->lru, fabs(so->ttl));
-		sg->nobj++;
-	}
-	WRK_SumStat(sp->wrk);
-	sg->flags |= SMP_SEG_LOADED;
-}
 
 /*--------------------------------------------------------------------
  * Attempt to open and read in a segment list
@@ -629,142 +288,6 @@ smp_open_segs(struct smp_sc *sc, struct smp_signctx *ctx)
 }
 
 /*--------------------------------------------------------------------
- * Create a new segment
- */
-
-static void
-smp_new_seg(struct smp_sc *sc)
-{
-	struct smp_seg *sg, *sg2;
-
-	Lck_AssertHeld(&sc->mtx);
-	ALLOC_OBJ(sg, SMP_SEG_MAGIC);
-	AN(sg);
-	sg->sc = sc;
-	sg->lru = LRU_Alloc();
-	CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
-
-	/* XXX: find where it goes in silo */
-
-	sg->p.offset = sc->free_offset;
-	// XXX: align */
-	assert(sg->p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
-	assert(sg->p.offset < sc->mediasize);
-
-	sg->p.length = sc->aim_segl;
-	sg->p.length &= ~7;
-
-	if (smp_segend(sg) > sc->mediasize) {
-		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
-		sg->p.offset = sc->free_offset;
-		sg2 = VTAILQ_FIRST(&sc->segments);
-		if (smp_segend(sg) > sg2->p.offset) {
-			printf("Out of space in persistent silo\n");
-			printf("Committing suicide, restart will make space\n");
-			exit (0);
-		}
-	}
-
-
-	assert(smp_segend(sg) <= sc->mediasize);
-
-	sg2 = VTAILQ_FIRST(&sc->segments);
-	if (sg2 != NULL && sg2->p.offset > sc->free_offset) {
-		if (smp_segend(sg) > sg2->p.offset) {
-			printf("Out of space in persistent silo\n");
-			printf("Committing suicide, restart will make space\n");
-			exit (0);
-		}
-		assert(smp_segend(sg) <= sg2->p.offset);
-	}
-
-	sg->p.offset = IRNUP(sc, sg->p.offset);
-	sg->p.length = IRNDN(sc, sg->p.length);
-	sc->free_offset = sg->p.offset + sg->p.length;
-
-	VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
-
-	/* Neuter the new segment in case there is an old one there */
-	AN(sg->p.offset);
-	smp_def_sign(sc, sg->ctx, sg->p.offset, "SEGHEAD");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
-
-	/* Set up our allocation points */
-	sc->cur_seg = sg;
-	sc->next_bot = sg->p.offset + IRNUP(sc, SMP_SIGN_SPACE);
-	sc->next_top = smp_segend(sg);
-	sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
-	IASSERTALIGN(sc, sc->next_bot);
-	IASSERTALIGN(sc, sc->next_top);
-	sg->objs = (void*)(sc->base + sc->next_top);
-}
-
-/*--------------------------------------------------------------------
- * Close a segment
- */
-
-static void
-smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
-{
-	uint64_t left, dst, len;
-	void *dp;
-
-	Lck_AssertHeld(&sc->mtx);
-
-	assert(sg == sc->cur_seg);
-	AN(sg->p.offset);
-	sc->cur_seg = NULL;
-
-	if (sg->nalloc == 0) {
-		/* XXX: if segment is empty, delete instead */
-		VTAILQ_REMOVE(&sc->segments, sg, list);
-		free(sg);
-		return;
-	}
-
-	/*
-	 * If there is enough space left, that we can move the smp_objects
-	 * down without overwriting the present copy, we will do so to
-	 * compact the segment.
-	 */
-	left = smp_spaceleft(sc, sg);
-	len = sizeof(struct smp_object) * sg->p.lobjlist;
-	if (len < left) {
-		dst = sc->next_bot + IRNUP(sc, SMP_SIGN_SPACE);
-		dp = sc->base + dst;
-		assert((uintptr_t)dp + len < (uintptr_t)sg->objs);
-		memcpy(dp, sg->objs, len);
-		sc->next_top = dst;
-		sg->objs = dp;
-		sg->p.length = (sc->next_top - sg->p.offset)
-		     + len + IRNUP(sc, SMP_SIGN_SPACE);
-		(void)smp_spaceleft(sc, sg);	/* for the asserts */
-
-	}
-
-	/* Update the segment header */
-	sg->p.objlist = sc->next_top;
-
-	/* Write the (empty) OBJIDX signature */
-	sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
-	assert(sc->next_top >= sc->next_bot);
-	smp_def_sign(sc, sg->ctx, sc->next_top, "OBJIDX");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
-
-	/* Write the (empty) SEGTAIL signature */
-	smp_def_sign(sc, sg->ctx,
-	    sg->p.offset + sg->p.length - IRNUP(sc, SMP_SIGN_SPACE), "SEGTAIL");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
-
-	/* Save segment list */
-	smp_save_segs(sc);
-	sc->free_offset = smp_segend(sg);
-}
-
-/*--------------------------------------------------------------------
  * Silo worker thread
  */
 
@@ -1006,9 +529,7 @@ smp_allocobj(struct stevedore *stv, struct sess *sp, unsigned ltot,
 	so->ptr = (uint8_t*)o - sc->base;
 	so->ban = o->ban_t;
 
-	oc->priv = sg;
-	oc->priv2 = objidx;
-	oc->methods = &smp_oc_methods;
+	smp_init_oc(oc, sg, objidx);
 
 	Lck_Unlock(&sc->mtx);
 	return (o);
diff --git a/bin/varnishd/storage_persistent.h b/bin/varnishd/storage_persistent.h
index 8184588..35affb6 100644
--- a/bin/varnishd/storage_persistent.h
+++ b/bin/varnishd/storage_persistent.h
@@ -174,20 +174,26 @@ struct smp_sc {
 #define SIGN_DATA(ctx)	((void *)((ctx)->ss + 1))
 #define SIGN_END(ctx)	((void *)((int8_t *)SIGN_DATA(ctx) + (ctx)->ss->length))
 
-/* storage_persistent.c */
-
 /* storage_persistent_mgt.c */
+
 void smp_mgt_init(struct stevedore *parent, int ac, char * const *av);
 
+/* storage_persistent_silo.c */
+
+void smp_load_seg(const struct sess *sp, const struct smp_sc *sc,
+    struct smp_seg *sg);
+void smp_new_seg(struct smp_sc *sc);
+void smp_close_seg(struct smp_sc *sc, struct smp_seg *sg);
+void smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx);
+
 /* storage_persistent_subr.c */
+
 void smp_def_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
     uint64_t off, const char *id);
 int smp_chk_sign(struct smp_signctx *ctx);
 void smp_append_sign(struct smp_signctx *ctx, const void *ptr, uint32_t len);
 void smp_reset_sign(struct smp_signctx *ctx);
 void smp_sync_sign(const struct smp_signctx *ctx);
-void smp_new_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
-    uint64_t off, const char *id);
 void smp_newsilo(struct smp_sc *sc);
 int smp_valid_silo(struct smp_sc *sc);
 
@@ -206,6 +212,20 @@ smp_stuff_len(const struct smp_sc *sc, unsigned stuff)
 	return (l);
 }
 
+static inline uint64_t
+smp_segend(const struct smp_seg *sg)
+{
 
+	return (sg->p.offset + sg->p.length);
+}
 
+static inline uint64_t
+smp_spaceleft(const struct smp_sc *sc, const struct smp_seg *sg)
+{
 
+	IASSERTALIGN(sc, sc->next_bot);
+	assert(sc->next_bot <= sc->next_top - IRNUP(sc, SMP_SIGN_SPACE));
+	assert(sc->next_bot >= sg->p.offset);
+	assert(sc->next_top < sg->p.offset + sg->p.length);
+	return ((sc->next_top - sc->next_bot) - IRNUP(sc, SMP_SIGN_SPACE));
+}
diff --git a/bin/varnishd/storage_persistent_silo.c b/bin/varnishd/storage_persistent_silo.c
index 4a971cb..480474e 100644
--- a/bin/varnishd/storage_persistent_silo.c
+++ b/bin/varnishd/storage_persistent_silo.c
@@ -38,33 +38,20 @@
 #include "svnid.h"
 SVNID("$Id$")
 
-#include <errno.h>
 #include <math.h>
 #include <stdio.h>
 #include <stdint.h>
 #include <stdlib.h>
-#include <string.h>
-#include <sys/param.h>
 #include <sys/mman.h>
 
 #include "cache.h"
 #include "stevedore.h"
 #include "hash_slinger.h"
 #include "vsha256.h"
-#include "cli.h"
-#include "cli_priv.h"
 
 #include "persistent.h"
 #include "storage_persistent.h"
 
-/*--------------------------------------------------------------------*/
-
-/*
- * silos is unlocked, it only changes during startup when we are
- * single-threaded
- */
-static VTAILQ_HEAD(,smp_sc)	silos = VTAILQ_HEAD_INITIALIZER(silos);
-
 /*--------------------------------------------------------------------
  * Write the segmentlist back to the silo.
  *
@@ -120,328 +107,6 @@ smp_save_segs(struct smp_sc *sc)
 	smp_save_seg(sc, &sc->seg2);
 }
 
-
-/*---------------------------------------------------------------------
- */
-
-static struct smp_object *
-smp_find_so(const struct smp_seg *sg, const struct objcore *oc)
-{
-	struct smp_object *so;
-	unsigned smp_idx;
-
-	smp_idx = oc->priv2;
-	assert(smp_idx > 0);
-	assert(smp_idx <= sg->p.lobjlist);
-	so = &sg->objs[sg->p.lobjlist - smp_idx];
-	return (so);
-}
-
-/*---------------------------------------------------------------------
- * Check if a given storage structure is valid to use
- */
-
-static int
-smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
-    const struct storage *st)
-{
-	struct smp_seg *sg2;
-	const uint8_t *pst;
-	uint64_t o;
-
-	(void)sg;		/* XXX: faster: Start search from here */
-	pst = (const void *)st;
-
-	if (pst < (sc->base + sc->ident->stuff[SMP_SPC_STUFF]))
-		return (0x01);		/* Before silo payload start */
-	if (pst > (sc->base + sc->ident->stuff[SMP_END_STUFF]))
-		return (0x02);		/* After silo end */
-
-	o = pst - sc->base;
-
-	/* Find which segment contains the storage structure */
-	VTAILQ_FOREACH(sg2, &sc->segments, list)
-		if (o > sg2->p.offset && (o + sizeof(*st)) < sg2->p.objlist)
-			break;
-	if (sg2 == NULL)
-		return (0x04);		/* No claiming segment */
-	if (!(sg2->flags & SMP_SEG_LOADED))
-		return (0x08);		/* Claiming segment not loaded */
-
-	/* It is now safe to access the storage structure */
-	if (st->magic != STORAGE_MAGIC)
-		return (0x10);		/* Not enough magic */
-
-	if (o + st->space >= sg2->p.objlist)
-		return (0x20);		/* Allocation not inside segment */
-
-	if (st->len > st->space)
-		return (0x40);		/* Plain bad... */
-
-	/*
-	 * XXX: We could patch up st->stevedore and st->priv here
-	 * XXX: but if things go right, we will never need them.
-	 */
-	return (0);
-}
-
-/*---------------------------------------------------------------------
- * objcore methods for persistent objects
- */
-
-static struct object *
-smp_oc_getobj(struct worker *wrk, struct objcore *oc)
-{
-	struct object *o;
-	struct smp_seg *sg;
-	struct smp_object *so;
-	struct storage *st;
-	uint64_t l;
-	int bad;
-
-	/* Some calls are direct, but they should match anyway */
-	assert(oc->methods->getobj == smp_oc_getobj);
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	if (wrk == NULL)
-		AZ(oc->flags & OC_F_NEEDFIXUP);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	so = smp_find_so(sg, oc);
-
-	o = (void*)(sg->sc->base + so->ptr);
-	/*
-	 * The object may not be in this segment since we allocate it
-	 * In a separate operation than the smp_object.  We could check
-	 * that it is in a later segment, but that would be complicated.
-	 * XXX: For now, be happy if it is inside th silo
-	 */
-	ASSERT_PTR_IN_SILO(sg->sc, o);
-	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
-
-	/*
-	 * If this flag is not set, it will not be, and the lock is not
-	 * needed to test it.
-	 */
-	if (!(oc->flags & OC_F_NEEDFIXUP))
-		return (o);
-
-	AN(wrk);
-	Lck_Lock(&sg->sc->mtx);
-	/* Check again, we might have raced. */
-	if (oc->flags & OC_F_NEEDFIXUP) {
-		/* We trust caller to have a refcnt for us */
-		o->objcore = oc;
-
-		bad = 0;
-		l = 0;
-		VTAILQ_FOREACH(st, &o->store, list) {
-			bad |= smp_loaded_st(sg->sc, sg, st);
-			if (bad)
-				break;
-			l += st->len;
-		}
-		if (l != o->len)
-			bad |= 0x100;
-
-		if(bad) {
-			o->ttl = 0;
-			o->grace = 0;
-			so->ttl = 0;
-		}
-
-		sg->nfixed++;
-		wrk->stats.n_object++;
-		wrk->stats.n_vampireobject--;
-		oc->flags &= ~OC_F_NEEDFIXUP;
-	}
-	Lck_Unlock(&sg->sc->mtx);
-	return (o);
-}
-
-static void
-smp_oc_updatemeta(struct objcore *oc)
-{
-	struct object *o;
-	struct smp_seg *sg;
-	struct smp_object *so;
-	double mttl;
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	o = smp_oc_getobj(NULL, oc);
-	AN(o);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	CHECK_OBJ_NOTNULL(sg->sc, SMP_SC_MAGIC);
-	so = smp_find_so(sg, oc);
-
-	if (isnan(o->grace))
-		mttl = o->ttl;
-	else
-		mttl = - (o->ttl + o->grace);
-
-	if (sg == sg->sc->cur_seg) {
-		/* Lock necessary, we might race close_seg */
-		Lck_Lock(&sg->sc->mtx);
-		so->ban = o->ban_t;
-		so->ttl = mttl;
-		Lck_Unlock(&sg->sc->mtx);
-	} else {
-		so->ban = o->ban_t;
-		so->ttl = mttl;
-	}
-}
-
-static void __match_proto__()
-smp_oc_freeobj(struct objcore *oc)
-{
-	struct smp_seg *sg;
-	struct smp_object *so;
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	so = smp_find_so(sg, oc);
-
-	Lck_Lock(&sg->sc->mtx);
-	so->ttl = 0;
-	so->ptr = 0;
-
-	assert(sg->nobj > 0);
-	assert(sg->nfixed > 0);
-	sg->nobj--;
-	sg->nfixed--;
-
-	Lck_Unlock(&sg->sc->mtx);
-}
-
-static struct objcore_methods smp_oc_methods = {
-	.getobj =		smp_oc_getobj,
-	.updatemeta =		smp_oc_updatemeta,
-	.freeobj =		smp_oc_freeobj,
-};
-
-/*--------------------------------------------------------------------
- * Add a new ban to all silos
- */
-
-static void
-smp_appendban(struct smp_sc *sc, struct smp_signctx *ctx, double t0,
-    uint32_t flags, uint32_t len, const char *ban)
-{
-	uint8_t *ptr, *ptr2;
-
-	(void)sc;
-	ptr = ptr2 = SIGN_END(ctx);
-
-	memcpy(ptr, "BAN", 4);
-	ptr += 4;
-
-	memcpy(ptr, &t0, sizeof t0);
-	ptr += sizeof t0;
-
-	memcpy(ptr, &flags, sizeof flags);
-	ptr += sizeof flags;
-
-	memcpy(ptr, &len, sizeof len);
-	ptr += sizeof len;
-
-	memcpy(ptr, ban, len);
-	ptr += len;
-
-	smp_append_sign(ctx, ptr2, ptr - ptr2);
-}
-
-void
-SMP_NewBan(double t0, const char *ban)
-{
-	struct smp_sc *sc;
-	uint32_t l = strlen(ban) + 1;
-
-	VTAILQ_FOREACH(sc, &silos, list) {
-		smp_appendban(sc, &sc->ban1, t0, 0, l, ban);
-		smp_appendban(sc, &sc->ban2, t0, 0, l, ban);
-	}
-}
-
-/*--------------------------------------------------------------------
- * Attempt to open and read in a ban list
- */
-
-static int
-smp_open_bans(struct smp_sc *sc, struct smp_signctx *ctx)
-{
-	uint8_t *ptr, *pe;
-	double t0;
-	uint32_t flags, length;
-	int i, retval = 0;
-
-	ASSERT_CLI();
-	(void)sc;
-	i = smp_chk_sign(ctx);
-	if (i)
-		return (i);
-	ptr = SIGN_DATA(ctx);
-	pe = ptr + ctx->ss->length;
-
-	while (ptr < pe) {
-		if (memcmp(ptr, "BAN", 4)) {
-			retval = 1001;
-			break;
-		}
-		ptr += 4;
-
-		memcpy(&t0, ptr, sizeof t0);
-		ptr += sizeof t0;
-
-		memcpy(&flags, ptr, sizeof flags);
-		ptr += sizeof flags;
-		if (flags != 0) {
-			retval = 1002;
-			break;
-		}
-
-		memcpy(&length, ptr, sizeof length);
-		ptr += sizeof length;
-		if (ptr + length > pe) {
-			retval = 1003;
-			break;
-		}
-
-		if (ptr[length - 1] != '\0') {
-			retval = 1004;
-			break;
-		}
-
-		BAN_Reload(t0, flags, (const char *)ptr);
-
-		ptr += length;
-	}
-	assert(ptr <= pe);
-	return (retval);
-}
-
-
-/*--------------------------------------------------------------------*/
-
-static uint64_t
-smp_segend(const struct smp_seg *sg)
-{
-
-	return (sg->p.offset + sg->p.length);
-}
-
-static uint64_t
-smp_spaceleft(const struct smp_sc *sc, const struct smp_seg *sg)
-{
-
-	IASSERTALIGN(sc, sc->next_bot);
-	assert(sc->next_bot <= sc->next_top - IRNUP(sc, SMP_SIGN_SPACE));
-	assert(sc->next_bot >= sg->p.offset);
-	assert(sc->next_top < sg->p.offset + sg->p.length);
-	return ((sc->next_top - sc->next_bot) - IRNUP(sc, SMP_SIGN_SPACE));
-}
-
 /*--------------------------------------------------------------------
  * Load segments
  *
@@ -456,8 +121,9 @@ smp_spaceleft(const struct smp_sc *sc, const struct smp_seg *sg)
  * XXX: by the protection.
  */
 
-static void
-smp_load_seg(const struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
+void
+smp_load_seg(const struct sess *sp, const struct smp_sc *sc,
+    struct smp_seg *sg)
 {
 	struct smp_object *so;
 	struct objcore *oc;
@@ -494,9 +160,7 @@ smp_load_seg(const struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
 		oc = sp->wrk->nobjcore;
 		oc->flags |= OC_F_NEEDFIXUP | OC_F_LRUDONTMOVE;
 		oc->flags &= ~OC_F_BUSY;
-		oc->priv = sg;
-		oc->priv2 = no;
-		oc->methods = &smp_oc_methods;
+		smp_init_oc(oc, sg, no);
 		oc->ban = BAN_RefBan(oc, so->ban, sc->tailban);
 		memcpy(sp->wrk->nobjhead->digest, so->hash, SHA256_LEN);
 		(void)HSH_Insert(sp);
@@ -509,130 +173,10 @@ smp_load_seg(const struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
 }
 
 /*--------------------------------------------------------------------
- * Attempt to open and read in a segment list
- */
-
-static int
-smp_open_segs(struct smp_sc *sc, struct smp_signctx *ctx)
-{
-	uint64_t length, l;
-	struct smp_segptr *ss, *se;
-	struct smp_seg *sg, *sg1, *sg2;
-	int i, n = 0;
-
-	ASSERT_CLI();
-	i = smp_chk_sign(ctx);
-	if (i)
-		return (i);
-
-	ss = SIGN_DATA(ctx);
-	length = ctx->ss->length;
-
-	if (length == 0) {
-		/* No segments */
-		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
-		return (0);
-	}
-	se = ss + length / sizeof *ss;
-	se--;
-	assert(ss <= se);
-
-	/*
-	 * Locate the free reserve, there are only two basic cases,
-	 * but once we start dropping segments, things gets more complicated.
-	 */
-
-	sc->free_offset = se->offset + se->length;
-	l = sc->mediasize - sc->free_offset;
-	if (se->offset > ss->offset && l >= sc->free_reserve) {
-		/*
-		 * [__xxxxyyyyzzzz___]
-		 * Plenty of space at tail, do nothing.
-		 */
-	} else if (ss->offset > se->offset) {
-		/*
-		 * [zzzz____xxxxyyyy_]
-		 * (make) space between ends
-		 * We might nuke the entire tail end without getting
-		 * enough space, in which case we fall through to the
-		 * last check.
-		 */
-		while (ss < se && ss->offset > se->offset) {
-			l = ss->offset - (se->offset + se->length);
-			if (l > sc->free_reserve)
-				break;
-			ss++;
-			n++;
-		}
-	}
-
-	if (l < sc->free_reserve) {
-		/*
-		 * [__xxxxyyyyzzzz___]
-		 * (make) space at front
-		 */
-		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
-		while (ss < se) {
-			l = ss->offset - sc->free_offset;
-			if (l > sc->free_reserve)
-				break;
-			ss++;
-			n++;
-		}
-	}
-
-	assert (l >= sc->free_reserve);
-
-
-	sg1 = NULL;
-	sg2 = NULL;
-	for(; ss <= se; ss++) {
-		ALLOC_OBJ(sg, SMP_SEG_MAGIC);
-		AN(sg);
-		sg->lru = LRU_Alloc();
-		CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
-		sg->p = *ss;
-
-		sg->flags |= SMP_SEG_MUSTLOAD;
-
-		/*
-		 * HACK: prevent save_segs from nuking segment until we have
-		 * HACK: loaded it.
-		 */
-		sg->nobj = 1;
-		if (sg1 != NULL) {
-			assert(sg1->p.offset != sg->p.offset);
-			if (sg1->p.offset < sg->p.offset)
-				assert(smp_segend(sg1) <= sg->p.offset);
-			else
-				assert(smp_segend(sg) <= sg1->p.offset);
-		}
-		if (sg2 != NULL) {
-			assert(sg2->p.offset != sg->p.offset);
-			if (sg2->p.offset < sg->p.offset)
-				assert(smp_segend(sg2) <= sg->p.offset);
-			else
-				assert(smp_segend(sg) <= sg2->p.offset);
-		}
-
-		/* XXX: check that they are inside silo */
-		/* XXX: check that they don't overlap */
-		/* XXX: check that they are serial */
-		sg->sc = sc;
-		VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
-		sg2 = sg;
-		if (sg1 == NULL)
-			sg1 = sg;
-	}
-	printf("Dropped %d segments to make free_reserve\n", n);
-	return (0);
-}
-
-/*--------------------------------------------------------------------
  * Create a new segment
  */
 
-static void
+void
 smp_new_seg(struct smp_sc *sc)
 {
 	struct smp_seg *sg, *sg2;
@@ -704,7 +248,7 @@ smp_new_seg(struct smp_sc *sc)
  * Close a segment
  */
 
-static void
+void
 smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
 {
 	uint64_t left, dst, len;
@@ -764,415 +308,214 @@ smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
 	sc->free_offset = smp_segend(sg);
 }
 
-/*--------------------------------------------------------------------
- * Silo worker thread
- */
-
-static void *
-smp_thread(struct sess *sp, void *priv)
-{
-	struct smp_sc	*sc;
-	struct smp_seg *sg;
-
-	(void)sp;
-	CAST_OBJ_NOTNULL(sc, priv, SMP_SC_MAGIC);
-
-	/* First, load all the objects from all segments */
-	VTAILQ_FOREACH(sg, &sc->segments, list)
-		if (sg->flags & SMP_SEG_MUSTLOAD)
-			smp_load_seg(sp, sc, sg);
-
-	sc->flags |= SMP_SC_LOADED;
-	BAN_Deref(&sc->tailban);
-	sc->tailban = NULL;
-	printf("Silo completely loaded\n");
-	while (1)
-		(void)sleep (1);
-	NEEDLESS_RETURN(NULL);
-}
-
-/*--------------------------------------------------------------------
- * Open a silo in the worker process
- */
-
-static void
-smp_open(const struct stevedore *st)
-{
-	struct smp_sc	*sc;
-
-	ASSERT_CLI();
 
-	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
-
-	Lck_New(&sc->mtx, lck_smp);
-	Lck_Lock(&sc->mtx);
-
-	sc->stevedore = st;
-
-	/* We trust the parent to give us a valid silo, for good measure: */
-	AZ(smp_valid_silo(sc));
-
-	AZ(mprotect(sc->base, 4096, PROT_READ));
-
-	sc->ident = SIGN_DATA(&sc->idn);
-
-	/* We attempt ban1 first, and if that fails, try ban2 */
-	if (smp_open_bans(sc, &sc->ban1))
-		AZ(smp_open_bans(sc, &sc->ban2));
-
-	/* We attempt seg1 first, and if that fails, try seg2 */
-	if (smp_open_segs(sc, &sc->seg1))
-		AZ(smp_open_segs(sc, &sc->seg2));
-
-	sc->tailban = BAN_TailRef();
-	AN(sc->tailban);
-
-	/* XXX: save segments to ensure consistency between seg1 & seg2 ? */
-
-	/* XXX: abandon early segments to make sure we have free space ? */
-
-	/* Open a new segment, so we are ready to write */
-	smp_new_seg(sc);
-
-	/* Start the worker silo worker thread, it will load the objects */
-	WRK_BgThread(&sc->thread, "persistence", smp_thread, sc);
-
-	VTAILQ_INSERT_TAIL(&silos, sc, list);
-	Lck_Unlock(&sc->mtx);
-}
-
-/*--------------------------------------------------------------------
- * Close a silo
+/*---------------------------------------------------------------------
  */
 
-static void
-smp_close(const struct stevedore *st)
+static struct smp_object *
+smp_find_so(const struct smp_seg *sg, const struct objcore *oc)
 {
-	struct smp_sc	*sc;
-
-	ASSERT_CLI();
-
-	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
-	Lck_Lock(&sc->mtx);
-	smp_close_seg(sc, sc->cur_seg);
-	Lck_Unlock(&sc->mtx);
+	struct smp_object *so;
+	unsigned smp_idx;
 
-	/* XXX: reap thread */
+	smp_idx = oc->priv2;
+	assert(smp_idx > 0);
+	assert(smp_idx <= sg->p.lobjlist);
+	so = &sg->objs[sg->p.lobjlist - smp_idx];
+	return (so);
 }
 
-/*--------------------------------------------------------------------
- * Allocate a bite.
- *
- * Allocate [min_size...max_size] space from the bottom of the segment,
- * as is convenient.
- *
- * If 'so' + 'idx' is given, also allocate a smp_object from the top
- * of the segment.
- *
- * Return the segment in 'ssg' if given.
+/*---------------------------------------------------------------------
+ * Check if a given storage structure is valid to use
  */
 
-static struct storage *
-smp_allocx(struct stevedore *st, size_t min_size, size_t max_size,
-    struct smp_object **so, unsigned *idx, struct smp_seg **ssg)
+static int
+smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
+    const struct storage *st)
 {
-	struct smp_sc *sc;
-	struct storage *ss;
-	struct smp_seg *sg;
-	unsigned tries;
-	uint64_t left, extra;
+	struct smp_seg *sg2;
+	const uint8_t *pst;
+	uint64_t o;
 
-	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
-	assert(min_size <= max_size);
+	(void)sg;		/* XXX: faster: Start search from here */
+	pst = (const void *)st;
 
-	max_size = IRNUP(sc, max_size);
-	min_size = IRNUP(sc, min_size);
+	if (pst < (sc->base + sc->ident->stuff[SMP_SPC_STUFF]))
+		return (0x01);		/* Before silo payload start */
+	if (pst > (sc->base + sc->ident->stuff[SMP_END_STUFF]))
+		return (0x02);		/* After silo end */
 
-	extra = IRNUP(sc, sizeof(*ss));
-	if (so != NULL) {
-		extra += sizeof(**so);
-		AN(idx);
-	}
+	o = pst - sc->base;
 
-	Lck_Lock(&sc->mtx);
-	sg = NULL;
-	ss = NULL;
-	for (tries = 0; tries < 3; tries++) {
-		left = smp_spaceleft(sc, sc->cur_seg);
-		if (left >= extra + min_size)
+	/* Find which segment contains the storage structure */
+	VTAILQ_FOREACH(sg2, &sc->segments, list)
+		if (o > sg2->p.offset && (o + sizeof(*st)) < sg2->p.objlist)
 			break;
-		smp_close_seg(sc, sc->cur_seg);
-		smp_new_seg(sc);
-	}
-	if (left >= extra + min_size)  {
-		if (left < extra + max_size)
-			max_size = IRNDN(sc, left - extra);
-
-		sg = sc->cur_seg;
-		ss = (void*)(sc->base + sc->next_bot);
-		sc->next_bot += max_size + IRNUP(sc, sizeof(*ss));
-		sg->nalloc++;
-		if (so != NULL) {
-			sc->next_top -= sizeof(**so);
-			*so = (void*)(sc->base + sc->next_top);
-			/* Render this smp_object mostly harmless */
-			(*so)->ttl = 0.;
-			(*so)->ban = 0.;
-			(*so)->ptr = 0;;
-			sg->objs = *so;
-			*idx = ++sg->p.lobjlist;
-		}
-		(void)smp_spaceleft(sc, sg);	/* for the assert */
-	}
-	Lck_Unlock(&sc->mtx);
+	if (sg2 == NULL)
+		return (0x04);		/* No claiming segment */
+	if (!(sg2->flags & SMP_SEG_LOADED))
+		return (0x08);		/* Claiming segment not loaded */
 
-	if (ss == NULL)
-		return (ss);
-	AN(sg);
-	assert(max_size >= min_size);
-
-	/* Fill the storage structure */
-	memset(ss, 0, sizeof *ss);
-	ss->magic = STORAGE_MAGIC;
-	ss->ptr = PRNUP(sc, ss + 1);
-	ss->space = max_size;
-	ss->priv = sc;
-	ss->stevedore = st;
-	ss->fd = sc->fd;
-	if (ssg != NULL)
-		*ssg = sg;
-	return (ss);
-}
+	/* It is now safe to access the storage structure */
+	if (st->magic != STORAGE_MAGIC)
+		return (0x10);		/* Not enough magic */
 
-/*--------------------------------------------------------------------
- * Find the per-segment lru list for this object
- */
+	if (o + st->space >= sg2->p.objlist)
+		return (0x20);		/* Allocation not inside segment */
 
-static struct lru *
-smp_getlru(const struct object *o)
-{
-	struct smp_seg *sg;
+	if (st->len > st->space)
+		return (0x40);		/* Plain bad... */
 
-	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
-	CAST_OBJ_NOTNULL(sg, o->objcore->priv, SMP_SEG_MAGIC);
-	return (sg->lru);
+	/*
+	 * XXX: We could patch up st->stevedore and st->priv here
+	 * XXX: but if things go right, we will never need them.
+	 */
+	return (0);
 }
 
-/*--------------------------------------------------------------------
- * Allocate an object
+/*---------------------------------------------------------------------
+ * objcore methods for persistent objects
  */
 
 static struct object *
-smp_allocobj(struct stevedore *stv, struct sess *sp, unsigned ltot,
-    const struct stv_objsecrets *soc)
+smp_oc_getobj(struct worker *wrk, struct objcore *oc)
 {
 	struct object *o;
-	struct storage *st;
-	struct smp_sc	*sc;
 	struct smp_seg *sg;
 	struct smp_object *so;
-	struct objcore *oc;
-	unsigned objidx;
-
-	CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC);
-	AN(sp->objcore);
-	AN(sp->wrk->ttl >= 0);
+	struct storage *st;
+	uint64_t l;
+	int bad;
 
-	ltot = IRNUP(sc, ltot);
+	/* Some calls are direct, but they should match anyway */
+	assert(oc->methods->getobj == smp_oc_getobj);
 
-	st = smp_allocx(stv, ltot, ltot, &so, &objidx, &sg);
-	if (st == NULL)
-		return (NULL);
+	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	if (wrk == NULL)
+		AZ(oc->flags & OC_F_NEEDFIXUP);
 
-	assert(st->space >= ltot);
-	ltot = st->len = st->space;
+	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
+	so = smp_find_so(sg, oc);
 
-	o = STV_MkObject(sp, st->ptr, ltot, soc);
+	o = (void*)(sg->sc->base + so->ptr);
+	/*
+	 * The object may not be in this segment since we allocate it
+	 * In a separate operation than the smp_object.  We could check
+	 * that it is in a later segment, but that would be complicated.
+	 * XXX: For now, be happy if it is inside th silo
+	 */
+	ASSERT_PTR_IN_SILO(sg->sc, o);
 	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
-	o->objstore = st;
 
-	oc = o->objcore;
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	oc->flags |= OC_F_LRUDONTMOVE;
+	/*
+	 * If this flag is not set, it will not be, and the lock is not
+	 * needed to test it.
+	 */
+	if (!(oc->flags & OC_F_NEEDFIXUP))
+		return (o);
 
-	Lck_Lock(&sc->mtx);
-	sg->nfixed++;
-	sg->nobj++;
+	AN(wrk);
+	Lck_Lock(&sg->sc->mtx);
+	/* Check again, we might have raced. */
+	if (oc->flags & OC_F_NEEDFIXUP) {
+		/* We trust caller to have a refcnt for us */
+		o->objcore = oc;
 
-	/* We have to do this somewhere, might as well be here... */
-	assert(sizeof so->hash == DIGEST_LEN);
-	memcpy(so->hash, oc->objhead->digest, DIGEST_LEN);
-	so->ttl = o->ttl;	/* XXX: grace? */
-	so->ptr = (uint8_t*)o - sc->base;
-	so->ban = o->ban_t;
+		bad = 0;
+		l = 0;
+		VTAILQ_FOREACH(st, &o->store, list) {
+			bad |= smp_loaded_st(sg->sc, sg, st);
+			if (bad)
+				break;
+			l += st->len;
+		}
+		if (l != o->len)
+			bad |= 0x100;
 
-	oc->priv = sg;
-	oc->priv2 = objidx;
-	oc->methods = &smp_oc_methods;
+		if(bad) {
+			o->ttl = 0;
+			o->grace = 0;
+			so->ttl = 0;
+		}
 
-	Lck_Unlock(&sc->mtx);
+		sg->nfixed++;
+		wrk->stats.n_object++;
+		wrk->stats.n_vampireobject--;
+		oc->flags &= ~OC_F_NEEDFIXUP;
+	}
+	Lck_Unlock(&sg->sc->mtx);
 	return (o);
 }
 
-/*--------------------------------------------------------------------
- * Allocate a bite
- */
-
-static struct storage *
-smp_alloc(struct stevedore *st, size_t size)
-{
-
-	return (smp_allocx(st,
-	    size > 4096 ? 4096 : size, size, NULL, NULL, NULL));
-}
-
-/*--------------------------------------------------------------------
- * Trim a bite
- * XXX: We could trim the last allocation.
- */
-
 static void
-smp_trim(struct storage *ss, size_t size)
-{
-
-	(void)ss;
-	(void)size;
-}
-
-/*--------------------------------------------------------------------
- * We don't track frees of storage, we track the objects which own the
- * storage and when there are no more objects in in the first segment,
- * it can be reclaimed.
- * XXX: We could free the last allocation, but does that happen ?
- */
-
-static void __match_proto__(storage_free_f)
-smp_free(struct storage *st)
+smp_oc_updatemeta(struct objcore *oc)
 {
+	struct object *o;
+	struct smp_seg *sg;
+	struct smp_object *so;
+	double mttl;
 
-	/* XXX */
-	(void)st;
-}
+	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	o = smp_oc_getobj(NULL, oc);
+	AN(o);
 
-/*--------------------------------------------------------------------
- * Pause until all silos have loaded.
- */
+	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
+	CHECK_OBJ_NOTNULL(sg->sc, SMP_SC_MAGIC);
+	so = smp_find_so(sg, oc);
 
-void
-SMP_Ready(void)
-{
-	struct smp_sc *sc;
+	if (isnan(o->grace))
+		mttl = o->ttl;
+	else
+		mttl = - (o->ttl + o->grace);
 
-	ASSERT_CLI();
-	do {
-		VTAILQ_FOREACH(sc, &silos, list)
-			if (!(sc->flags & SMP_SC_LOADED))
-				break;
-		if (sc != NULL)
-			(void)sleep(1);
-	} while (sc != NULL);
+	if (sg == sg->sc->cur_seg) {
+		/* Lock necessary, we might race close_seg */
+		Lck_Lock(&sg->sc->mtx);
+		so->ban = o->ban_t;
+		so->ttl = mttl;
+		Lck_Unlock(&sg->sc->mtx);
+	} else {
+		so->ban = o->ban_t;
+		so->ttl = mttl;
+	}
 }
 
-/*--------------------------------------------------------------------*/
-
-const struct stevedore smp_stevedore = {
-	.magic	=	STEVEDORE_MAGIC,
-	.name	=	"persistent",
-	.init	=	smp_mgt_init,
-	.open	=	smp_open,
-	.close	=	smp_close,
-	.alloc	=	smp_alloc,
-	.allocobj =	smp_allocobj,
-	.getlru	=	smp_getlru,
-	.free	=	smp_free,
-	.trim	=	smp_trim,
-};
-
-/*--------------------------------------------------------------------
- * Persistence is a bear to test unadultered, so we cheat by adding
- * a cli command we can use to make it do tricks for us.
- */
-
-static void
-debug_report_silo(struct cli *cli, const struct smp_sc *sc, int objs)
+static void __match_proto__()
+smp_oc_freeobj(struct objcore *oc)
 {
 	struct smp_seg *sg;
-	struct objcore *oc;
+	struct smp_object *so;
 
-	cli_out(cli, "Silo: %s (%s)\n",
-	    sc->stevedore->ident, sc->filename);
-	VTAILQ_FOREACH(sg, &sc->segments, list) {
-		cli_out(cli, "  Seg: [0x%jx ... +0x%jx]\n",
-		   (uintmax_t)sg->p.offset, (uintmax_t)sg->p.length);
-		if (sg == sc->cur_seg)
-			cli_out(cli,
-			   "    Alloc: [0x%jx ... 0x%jx] = 0x%jx free\n",
-			   (uintmax_t)(sc->next_bot),
-			   (uintmax_t)(sc->next_top),
-			   (uintmax_t)(sc->next_top - sc->next_bot));
-		cli_out(cli, "    %u nobj, %u alloc, %u lobjlist, %u fixed\n",
-		    sg->nobj, sg->nalloc, sg->p.lobjlist, sg->nfixed);
-		if (objs) {
-			VLIST_FOREACH(oc, &sg->lru->lru_head, lru_list)
-				cli_out(cli, "      %s %p\n",
-				    oc == &sg->lru->senteniel ?
-				    "senteniel" : "OC: ", oc);
-		}
-	}
-}
+	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 
-static void
-debug_persistent(struct cli *cli, const char * const * av, void *priv)
-{
-	struct smp_sc *sc;
+	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
+	so = smp_find_so(sg, oc);
 
-	(void)priv;
+	Lck_Lock(&sg->sc->mtx);
+	so->ttl = 0;
+	so->ptr = 0;
 
-	if (av[2] == NULL) {
-		VTAILQ_FOREACH(sc, &silos, list)
-			debug_report_silo(cli, sc, 0);
-		return;
-	}
-	VTAILQ_FOREACH(sc, &silos, list)
-		if (!strcmp(av[2], sc->stevedore->ident))
-			break;
-	if (sc == NULL) {
-		cli_out(cli, "Silo <%s> not found\n", av[2]);
-		cli_result(cli, CLIS_PARAM);
-		return;
-	}
-	if (av[3] == NULL) {
-		debug_report_silo(cli, sc, 0);
-		return;
-	}
-	Lck_Lock(&sc->mtx);
-	if (!strcmp(av[3], "sync")) {
-		smp_close_seg(sc, sc->cur_seg);
-		smp_new_seg(sc);
-	} else if (!strcmp(av[3], "dump")) {
-		debug_report_silo(cli, sc, 1);
-	} else {
-		cli_out(cli, "Unknown operation\n");
-		cli_result(cli, CLIS_PARAM);
-	}
-	Lck_Unlock(&sc->mtx);
+	assert(sg->nobj > 0);
+	assert(sg->nfixed > 0);
+	sg->nobj--;
+	sg->nfixed--;
+
+	Lck_Unlock(&sg->sc->mtx);
 }
 
-static struct cli_proto debug_cmds[] = {
-        { "debug.persistent", "debug.persistent",
-                "Persistent debugging magic:\n"
-		"\tdebug.persistent [stevedore [cmd]]\n"
-		"With no cmd arg, a summary of the silo is returned.\n"
-		"Possible commands:\n"
-		"\tsync\tClose current segment, open a new one\n"
-		"\tdump\tinclude objcores in silo summary\n"
-		"",
-		0, 2, "d", debug_persistent },
-        { NULL }
+static struct objcore_methods smp_oc_methods = {
+	.getobj =		smp_oc_getobj,
+	.updatemeta =		smp_oc_updatemeta,
+	.freeobj =		smp_oc_freeobj,
 };
 
+/*--------------------------------------------------------------------*/
+
 void
-SMP_Init(void)
+smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx)
 {
-	CLI_AddFuncs(debug_cmds);
+
+	oc->priv = sg;
+	oc->priv2 = objidx;
+	oc->methods = &smp_oc_methods;
 }
diff --git a/bin/varnishd/storage_persistent_subr.c b/bin/varnishd/storage_persistent_subr.c
index bc6e1fd..62ca3bf 100644
--- a/bin/varnishd/storage_persistent_subr.c
+++ b/bin/varnishd/storage_persistent_subr.c
@@ -169,7 +169,7 @@ smp_sync_sign(const struct smp_signctx *ctx)
  * Create and force a new signature to backing store
  */
 
-void
+static void
 smp_new_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
     uint64_t off, const char *id)
 {



More information about the varnish-commit mailing list