[master] fe78027 Move everything related to the storage-expansion-API into a subdirectory where it will be protected from VMODs.

Poul-Henning Kamp phk at varnish-cache.org
Wed Oct 12 17:44:44 CEST 2011


commit fe780278743797013f6f26f0701b478a77d6b32a
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Wed Oct 12 15:44:14 2011 +0000

    Move everything related to the storage-expansion-API into a subdirectory
    where it will be protected from VMODs.

diff --git a/bin/varnishd/Makefile.am b/bin/varnishd/Makefile.am
index 2b822fa..5260427 100644
--- a/bin/varnishd/Makefile.am
+++ b/bin/varnishd/Makefile.am
@@ -64,16 +64,16 @@ varnishd_SOURCES = \
 	mgt_shmem.c \
 	mgt_vcc.c \
 	rfc2616.c \
-	stevedore.c \
-	stevedore_utils.c \
-	storage_file.c \
-	storage_malloc.c \
-	storage_persistent.c \
-	storage_persistent_mgt.c \
-	storage_persistent_silo.c \
-	storage_persistent_subr.c \
-	storage_synth.c \
-	storage_umem.c \
+	storage/stevedore.c \
+	storage/stevedore_utils.c \
+	storage/storage_file.c \
+	storage/storage_malloc.c \
+	storage/storage_persistent.c \
+	storage/storage_persistent_mgt.c \
+	storage/storage_persistent_silo.c \
+	storage/storage_persistent_subr.c \
+	storage/storage_synth.c \
+	storage/storage_umem.c \
 	varnishd.c \
 	vsm.c
 
@@ -88,8 +88,8 @@ noinst_HEADERS = \
 	heritage.h \
 	mgt.h \
 	mgt_cli.h \
-	storage.h \
-	storage_persistent.h \
+	storage/storage.h \
+	storage/storage_persistent.h \
 	vparam.h
 
 varnishd_CFLAGS = \
diff --git a/bin/varnishd/cache.h b/bin/varnishd/cache.h
index 7eb3258..33ccc8b 100644
--- a/bin/varnishd/cache.h
+++ b/bin/varnishd/cache.h
@@ -98,7 +98,6 @@ struct busyobj;
 struct cli;
 struct cli_proto;
 struct director;
-struct exp;
 struct iovec;
 struct objcore;
 struct object;
@@ -106,7 +105,6 @@ struct objhead;
 struct pool;
 struct sess;
 struct sesspool;
-struct storage;
 struct vbc;
 struct vef_priv;
 struct vrt_backend;
diff --git a/bin/varnishd/flint.sh b/bin/varnishd/flint.sh
index 690e445..86d4ad0 100755
--- a/bin/varnishd/flint.sh
+++ b/bin/varnishd/flint.sh
@@ -16,6 +16,7 @@ flexelint \
 	-I/usr/local/include \
 	-DVARNISH_STATE_DIR=\"foo\" \
 	*.c \
+	storage/*.c \
 	../../lib/libvarnish/*.c \
 	../../lib/libvarnishcompat/execinfo.c \
 	../../lib/libvcl/*.c \
diff --git a/bin/varnishd/stevedore.c b/bin/varnishd/stevedore.c
deleted file mode 100644
index 6deffb4..0000000
--- a/bin/varnishd/stevedore.c
+++ /dev/null
@@ -1,602 +0,0 @@
-/*-
- * Copyright (c) 2007-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Dag-Erling Smørgav <des at des.no>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * STEVEDORE: one who works at or is responsible for loading and
- * unloading ships in port.  Example: "on the wharves, stevedores were
- * unloading cargo from the far corners of the world." Origin: Spanish
- * estibador, from estibar to pack.  First Known Use: 1788
- */
-
-#include "config.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "cache.h"
-
-#include "storage.h"
-#include "vav.h"
-#include "vcli_priv.h"
-#include "vrt.h"
-#include "vrt_obj.h"
-
-static VTAILQ_HEAD(, stevedore)	stevedores =
-    VTAILQ_HEAD_INITIALIZER(stevedores);
-
-static const struct stevedore * volatile stv_next;
-
-static struct stevedore *stv_transient;
-
-/*---------------------------------------------------------------------
- * Default objcore methods
- */
-
-static struct object * __match_proto__(getobj_f)
-default_oc_getobj(struct worker *wrk, struct objcore *oc)
-{
-	struct object *o;
-
-	(void)wrk;
-	if (oc->priv == NULL)
-		return (NULL);
-	CAST_OBJ_NOTNULL(o, oc->priv, OBJECT_MAGIC);
-	return (o);
-}
-
-static void
-default_oc_freeobj(struct objcore *oc)
-{
-	struct object *o;
-
-	CAST_OBJ_NOTNULL(o, oc->priv, OBJECT_MAGIC);
-	oc->priv = NULL;
-	oc->methods = NULL;
-
-	STV_Freestore(o);
-	STV_free(o->objstore);
-}
-
-static struct lru *
-default_oc_getlru(const struct objcore *oc)
-{
-	struct object *o;
-
-	CAST_OBJ_NOTNULL(o, oc->priv, OBJECT_MAGIC);
-	return (o->objstore->stevedore->lru);
-}
-
-static struct objcore_methods default_oc_methods = {
-	.getobj = default_oc_getobj,
-	.freeobj = default_oc_freeobj,
-	.getlru = default_oc_getlru,
-};
-
-
-/*--------------------------------------------------------------------
- */
-
-struct lru *
-LRU_Alloc(void)
-{
-	struct lru *l;
-
-	ALLOC_OBJ(l, LRU_MAGIC);
-	AN(l);
-	VTAILQ_INIT(&l->lru_head);
-	Lck_New(&l->mtx, lck_lru);
-	return (l);
-}
-
-void
-LRU_Free(struct lru *lru)
-{
-	CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
-	Lck_Delete(&lru->mtx);
-	FREE_OBJ(lru);
-}
-
-/*--------------------------------------------------------------------
- * XXX: trust pointer writes to be atomic
- */
-
-static struct stevedore *
-stv_pick_stevedore(const struct sess *sp, const char **hint)
-{
-	struct stevedore *stv;
-
-	AN(hint);
-	if (*hint != NULL && **hint != '\0') {
-		VTAILQ_FOREACH(stv, &stevedores, list) {
-			if (!strcmp(stv->ident, *hint))
-				return (stv);
-		}
-		if (!strcmp(TRANSIENT_STORAGE, *hint))
-			return (stv_transient);
-
-		/* Hint was not valid, nuke it */
-		WSP(sp, SLT_Debug, "Storage hint not usable");
-		*hint = NULL;
-	}
-	/* pick a stevedore and bump the head along */
-	stv = VTAILQ_NEXT(stv_next, list);
-	if (stv == NULL)
-		stv = VTAILQ_FIRST(&stevedores);
-	AN(stv);
-	AN(stv->name);
-	stv_next = stv;
-	return (stv);
-}
-
-/*-------------------------------------------------------------------*/
-
-static struct storage *
-stv_alloc(const struct sess *sp, size_t size)
-{
-	struct storage *st;
-	struct stevedore *stv;
-	unsigned fail = 0;
-
-	/*
-	 * Always use the stevedore which allocated the object in order to
-	 * keep an object inside the same stevedore.
-	 */
-	CHECK_OBJ_NOTNULL(sp->obj, OBJECT_MAGIC);
-	stv = sp->obj->objstore->stevedore;
-	CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC);
-
-	if (size > (size_t)(params->fetch_maxchunksize) << 10)
-		size = (size_t)(params->fetch_maxchunksize) << 10;
-
-	for (;;) {
-		/* try to allocate from it */
-		AN(stv->alloc);
-		st = stv->alloc(stv, size);
-		if (st != NULL)
-			break;
-
-		if (size > params->fetch_chunksize * 1024LL) {
-			size >>= 1;
-			continue;
-		}
-
-		/* no luck; try to free some space and keep trying */
-		if (EXP_NukeOne(sp, stv->lru) == -1)
-			break;
-
-		/* Enough is enough: try another if we have one */
-		if (++fail >= params->nuke_limit)
-			break;
-	}
-	if (st != NULL)
-		CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
-	return (st);
-}
-
-
-/*-------------------------------------------------------------------*
- * Structure used to transport internal knowledge from STV_NewObject()
- * to STV_MkObject().  Nobody else should mess with this struct.
- */
-
-struct stv_objsecrets {
-	unsigned	magic;
-#define STV_OBJ_SECRETES_MAGIC	0x78c87247
-	uint16_t	nhttp;
-	unsigned	lhttp;
-	unsigned	wsl;
-	struct exp	*exp;
-};
-
-/*--------------------------------------------------------------------
- * This function is called by stevedores ->allocobj() method, which
- * very often will be stv_default_allocobj() below, to convert a slab
- * of storage into object which the stevedore can then register in its
- * internal state, before returning it to STV_NewObject().
- * As you probably guessed: All this for persistence.
- */
-
-struct object *
-STV_MkObject(struct sess *sp, void *ptr, unsigned ltot,
-    const struct stv_objsecrets *soc)
-{
-	struct object *o;
-	unsigned l;
-
-	CHECK_OBJ_NOTNULL(soc, STV_OBJ_SECRETES_MAGIC);
-
-	assert(PAOK(ptr));
-	assert(PAOK(soc->wsl));
-	assert(PAOK(soc->lhttp));
-
-	assert(ltot >= sizeof *o + soc->lhttp + soc->wsl);
-
-	o = ptr;
-	memset(o, 0, sizeof *o);
-	o->magic = OBJECT_MAGIC;
-
-	l = PRNDDN(ltot - (sizeof *o + soc->lhttp));
-	assert(l >= soc->wsl);
-
-	o->http = HTTP_create(o + 1, soc->nhttp);
-	WS_Init(o->ws_o, "obj", (char *)(o + 1) + soc->lhttp, soc->wsl);
-	WS_Assert(o->ws_o);
-	assert(o->ws_o->e <= (char*)ptr + ltot);
-
-	http_Setup(o->http, o->ws_o);
-	o->http->magic = HTTP_MAGIC;
-	o->exp = *soc->exp;
-	VTAILQ_INIT(&o->store);
-	sp->wrk->stats.n_object++;
-
-	if (sp->objcore != NULL) {
-		CHECK_OBJ_NOTNULL(sp->objcore, OBJCORE_MAGIC);
-
-		o->objcore = sp->objcore;
-		sp->objcore = NULL;     /* refcnt follows pointer. */
-		BAN_NewObjCore(o->objcore);
-
-		o->objcore->methods = &default_oc_methods;
-		o->objcore->priv = o;
-	}
-	return (o);
-}
-
-/*--------------------------------------------------------------------
- * This is the default ->allocobj() which all stevedores who do not
- * implement persistent storage can rely on.
- */
-
-static struct object *
-stv_default_allocobj(struct stevedore *stv, struct sess *sp, unsigned ltot,
-    const struct stv_objsecrets *soc)
-{
-	struct object *o;
-	struct storage *st;
-
-	CHECK_OBJ_NOTNULL(soc, STV_OBJ_SECRETES_MAGIC);
-	st = stv->alloc(stv, ltot);
-	if (st == NULL)
-		return (NULL);
-	if (st->space < ltot) {
-		stv->free(st);
-		return (NULL);
-	}
-	ltot = st->len = st->space;
-	o = STV_MkObject(sp, st->ptr, ltot, soc);
-	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
-	o->objstore = st;
-	return (o);
-}
-
-/*-------------------------------------------------------------------
- * Allocate storage for an object, based on the header information.
- * XXX: If we know (a hint of) the length, we could allocate space
- * XXX: for the body in the same allocation while we are at it.
- */
-
-struct object *
-STV_NewObject(struct sess *sp, const char *hint, unsigned wsl, struct exp *ep,
-    uint16_t nhttp)
-{
-	struct object *o;
-	struct stevedore *stv, *stv0;
-	unsigned lhttp, ltot;
-	struct stv_objsecrets soc;
-	int i;
-
-	assert(wsl > 0);
-	wsl = PRNDUP(wsl);
-
-	lhttp = HTTP_estimate(nhttp);
-	lhttp = PRNDUP(lhttp);
-
-	memset(&soc, 0, sizeof soc);
-	soc.magic = STV_OBJ_SECRETES_MAGIC;
-	soc.nhttp = nhttp;
-	soc.lhttp = lhttp;
-	soc.wsl = wsl;
-	soc.exp = ep;
-
-	ltot = sizeof *o + wsl + lhttp;
-
-	stv = stv0 = stv_pick_stevedore(sp, &hint);
-	AN(stv->allocobj);
-	o = stv->allocobj(stv, sp, ltot, &soc);
-	if (o == NULL && hint == NULL) {
-		do {
-			stv = stv_pick_stevedore(sp, &hint);
-			AN(stv->allocobj);
-			o = stv->allocobj(stv, sp, ltot, &soc);
-		} while (o == NULL && stv != stv0);
-	}
-	if (o == NULL) {
-		/* no luck; try to free some space and keep trying */
-		for (i = 0; o == NULL && i < params->nuke_limit; i++) {
-			if (EXP_NukeOne(sp, stv->lru) == -1)
-				break;
-			o = stv->allocobj(stv, sp, ltot, &soc);
-		}
-	}
-
-	if (o == NULL)
-		return (NULL);
-	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
-	CHECK_OBJ_NOTNULL(o->objstore, STORAGE_MAGIC);
-	return (o);
-}
-
-/*-------------------------------------------------------------------*/
-
-void
-STV_Freestore(struct object *o)
-{
-	struct storage *st, *stn;
-
-	if (o->esidata != NULL) {
-		STV_free(o->esidata);
-		o->esidata = NULL;
-	}
-	VTAILQ_FOREACH_SAFE(st, &o->store, list, stn) {
-		CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
-		VTAILQ_REMOVE(&o->store, st, list);
-		STV_free(st);
-	}
-}
-
-/*-------------------------------------------------------------------*/
-
-struct storage *
-STV_alloc(const struct sess *sp, size_t size)
-{
-
-	return (stv_alloc(sp, size));
-}
-
-void
-STV_trim(struct storage *st, size_t size)
-{
-
-	CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
-	AN(st->stevedore);
-	if (st->stevedore->trim)
-		st->stevedore->trim(st, size);
-}
-
-void
-STV_free(struct storage *st)
-{
-
-	CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
-	AN(st->stevedore);
-	AN(st->stevedore->free);
-	st->stevedore->free(st);
-}
-
-void
-STV_open(void)
-{
-	struct stevedore *stv;
-
-	VTAILQ_FOREACH(stv, &stevedores, list) {
-		stv->lru = LRU_Alloc();
-		if (stv->open != NULL)
-			stv->open(stv);
-	}
-	stv = stv_transient;
-	if (stv->open != NULL) {
-		stv->lru = LRU_Alloc();
-		stv->open(stv);
-	}
-}
-
-void
-STV_close(void)
-{
-	struct stevedore *stv;
-
-	VTAILQ_FOREACH(stv, &stevedores, list)
-		if (stv->close != NULL)
-			stv->close(stv);
-	stv = stv_transient;
-	if (stv->close != NULL)
-		stv->close(stv);
-}
-
-/*--------------------------------------------------------------------
- * Parse a stevedore argument on the form:
- *	[ name '=' ] strategy [ ',' arg ] *
- */
-
-static const struct choice STV_choice[] = {
-	{ "file",	&smf_stevedore },
-	{ "malloc",	&sma_stevedore },
-	{ "persistent",	&smp_stevedore },
-#ifdef HAVE_LIBUMEM
-	{ "umem",	&smu_stevedore },
-#endif
-	{ NULL,		NULL }
-};
-
-void
-STV_Config(const char *spec)
-{
-	char **av;
-	const char *p, *q;
-	struct stevedore *stv;
-	const struct stevedore *stv2;
-	int ac, l;
-	static unsigned seq = 0;
-
-	ASSERT_MGT();
-	p = strchr(spec, '=');
-	q = strchr(spec, ',');
-	if (p != NULL && (q == NULL || q > p)) {
-		av = VAV_Parse(p + 1, NULL, ARGV_COMMA);
-	} else {
-		av = VAV_Parse(spec, NULL, ARGV_COMMA);
-		p = NULL;
-	}
-	AN(av);
-
-	if (av[0] != NULL)
-		ARGV_ERR("%s\n", av[0]);
-
-	if (av[1] == NULL)
-		ARGV_ERR("-s argument lacks strategy {malloc, file, ...}\n");
-
-	for (ac = 0; av[ac + 2] != NULL; ac++)
-		continue;
-
-	stv2 = pick(STV_choice, av[1], "storage");
-	AN(stv2);
-
-	/* Append strategy to ident string */
-	VSB_printf(vident, ",-s%s", av[1]);
-
-	av += 2;
-
-	CHECK_OBJ_NOTNULL(stv2, STEVEDORE_MAGIC);
-	ALLOC_OBJ(stv, STEVEDORE_MAGIC);
-	AN(stv);
-
-	*stv = *stv2;
-	AN(stv->name);
-	AN(stv->alloc);
-	if (stv->allocobj == NULL)
-		stv->allocobj = stv_default_allocobj;
-
-	if (p == NULL)
-		bprintf(stv->ident, "s%u", seq++);
-	else {
-		l = p - spec;
-		if (l > sizeof stv->ident - 1)
-			l = sizeof stv->ident - 1;
-		bprintf(stv->ident, "%.*s", l, spec);
-	}
-
-	VTAILQ_FOREACH(stv2, &stevedores, list) {
-		if (strcmp(stv2->ident, stv->ident))
-			continue;
-		ARGV_ERR("(-s%s=%s) already defined once\n",
-		    stv->ident, stv->name);
-	}
-
-	if (stv->init != NULL)
-		stv->init(stv, ac, av);
-	else if (ac != 0)
-		ARGV_ERR("(-s%s) too many arguments\n", stv->name);
-
-	if (!strcmp(stv->ident, TRANSIENT_STORAGE)) {
-		stv->transient = 1;
-		AZ(stv_transient);
-		stv_transient = stv;
-	} else {
-		VTAILQ_INSERT_TAIL(&stevedores, stv, list);
-		if (!stv_next)
-			stv_next = VTAILQ_FIRST(&stevedores);
-	}
-}
-
-/*--------------------------------------------------------------------*/
-
-void
-STV_Config_Transient(void)
-{
-
-	ASSERT_MGT();
-
-	if (stv_transient == NULL)
-		STV_Config(TRANSIENT_STORAGE "=malloc");
-}
-
-/*--------------------------------------------------------------------*/
-
-static void
-stv_cli_list(struct cli *cli, const char * const *av, void *priv)
-{
-	struct stevedore *stv;
-
-	ASSERT_MGT();
-	(void)av;
-	(void)priv;
-	VCLI_Out(cli, "Storage devices:\n");
-	stv = stv_transient;
-		VCLI_Out(cli, "\tstorage.%s = %s\n", stv->ident, stv->name);
-	VTAILQ_FOREACH(stv, &stevedores, list)
-		VCLI_Out(cli, "\tstorage.%s = %s\n", stv->ident, stv->name);
-}
-
-/*--------------------------------------------------------------------*/
-
-struct cli_proto cli_stv[] = {
-	{ "storage.list", "storage.list", "List storage devices\n",
-	    0, 0, "", stv_cli_list },
-	{ NULL}
-};
-
-/*--------------------------------------------------------------------
- * VRT functions for stevedores
- */
-
-static const struct stevedore *
-stv_find(const char *nm)
-{
-	const struct stevedore *stv;
-
-	VTAILQ_FOREACH(stv, &stevedores, list)
-		if (!strcmp(stv->ident, nm))
-			return (stv);
-	if (!strcmp(TRANSIENT_STORAGE, nm))
-		return (stv_transient);
-	return (NULL);
-}
-
-int
-VRT_Stv(const char *nm)
-{
-
-	if (stv_find(nm) != NULL)
-		return (1);
-	return (0);
-}
-
-#define VRTSTVVAR(nm, vtype, ctype, dval)	\
-ctype						\
-VRT_Stv_##nm(const char *nm)			\
-{						\
-	const struct stevedore *stv;		\
-						\
-	stv = stv_find(nm);			\
-	if (stv == NULL)			\
-		return (dval);			\
-	if (stv->var_##nm == NULL)		\
-		return (dval);			\
-	return (stv->var_##nm(stv));		\
-}
-
-#include "tbl/vrt_stv_var.h"
-#undef VRTSTVVAR
diff --git a/bin/varnishd/stevedore_utils.c b/bin/varnishd/stevedore_utils.c
deleted file mode 100644
index e428f58..0000000
--- a/bin/varnishd/stevedore_utils.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*-
- * Copyright (c) 2006 Verdens Gang AS
- * Copyright (c) 2006-2010 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Utility functions for stevedores and storage modules
- */
-
-#include "config.h"
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#ifdef HAVE_SYS_MOUNT_H
-#  include <sys/mount.h>
-#endif
-#ifdef HAVE_SYS_STATVFS_H
-#  include <sys/statvfs.h>
-#endif
-#ifdef HAVE_SYS_VFS_H
-#  include <sys/vfs.h>
-#endif
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include "mgt.h"
-
-#include "storage.h"
-#include "vnum.h"
-
-#ifndef O_LARGEFILE
-#define O_LARGEFILE	0
-#endif
-
-/*--------------------------------------------------------------------
- * Get a storage file.
- *
- * The fn argument can be an existing file, an existing directory or
- * a nonexistent filename in an existing directory.
- *
- * If a directory is specified, the file will be anonymous (unlinked)
- *
- * Return:
- *	 0 if the file was preexisting.
- *	 1 if the file was created.
- *	 2 if the file is anonymous.
- *
- * Uses ARGV_ERR to exit in case of trouble.
- */
-
-int
-STV_GetFile(const char *fn, int *fdp, const char **fnp, const char *ctx)
-{
-	int fd;
-	struct stat st;
-	int retval = 1;
-	char buf[FILENAME_MAX];
-
-	AN(fn);
-	AN(fnp);
-	AN(fdp);
-	*fnp = NULL;
-	*fdp = -1;
-
-	/* try to create a new file of this name */
-	fd = open(fn, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE, 0600);
-	if (fd >= 0) {
-		*fdp = fd;
-		*fnp = fn;
-		return (retval);
-	}
-
-	if (stat(fn, &st))
-		ARGV_ERR(
-		    "(%s) \"%s\" does not exist and could not be created\n",
-		    ctx, fn);
-
-	if (S_ISDIR(st.st_mode)) {
-		bprintf(buf, "%s/varnish.XXXXXX", fn);
-		fd = mkstemp(buf);
-		if (fd < 0)
-			ARGV_ERR("(%s) \"%s\" mkstemp(%s) failed (%s)\n",
-			    ctx, fn, buf, strerror(errno));
-		AZ(unlink(buf));
-		*fnp = strdup(buf);
-		AN(*fnp);
-		retval = 2;
-	} else if (S_ISREG(st.st_mode)) {
-		fd = open(fn, O_RDWR | O_LARGEFILE);
-		if (fd < 0)
-			ARGV_ERR("(%s) \"%s\" could not open (%s)\n",
-			    ctx, fn, strerror(errno));
-		*fnp = fn;
-		retval = 0;
-	} else
-		ARGV_ERR(
-		    "(%s) \"%s\" is neither file nor directory\n", ctx, fn);
-
-	AZ(fstat(fd, &st));
-	if (!S_ISREG(st.st_mode))
-		ARGV_ERR("(%s) \"%s\" was not a file after opening\n",
-		    ctx, fn);
-
-	*fdp = fd;
-	return (retval);
-}
-
-/*--------------------------------------------------------------------
- * Figure out how much space is in a filesystem
- */
-
-static uintmax_t
-stv_fsspace(int fd, unsigned *bs)
-{
-	uintmax_t bsize, bavail;
-#if defined(HAVE_SYS_STATVFS_H)
-	struct statvfs fsst;
-
-	AZ(fstatvfs(fd, &fsst));
-	bsize = fsst.f_frsize;
-	bavail = fsst.f_bavail;
-#elif defined(HAVE_SYS_MOUNT_H) || defined(HAVE_SYS_VFS_H)
-	struct statfs fsst;
-
-	AZ(fstatfs(sc->fd, &fsst));
-	bsize = fsst.f_bsize;
-	bavail = fsst.f_bavail;
-#else
-#error no struct statfs / struct statvfs
-#endif
-
-	/* We use units of the larger of filesystem blocksize and pagesize */
-	if (*bs < bsize)
-		*bs = bsize;
-	xxxassert(*bs % bsize == 0);
-	return (bsize * bavail);
-}
-
-
-/*--------------------------------------------------------------------
- * Decide file size.
- *
- * If the sizespecification is empty and the file exists with non-zero
- * size, use that, otherwise, interpret the specification.
- *
- * Handle off_t sizes and pointer width limitations.
- */
-
-uintmax_t
-STV_FileSize(int fd, const char *size, unsigned *granularity, const char *ctx)
-{
-	uintmax_t l, fssize;
-	unsigned bs;
-	const char *q;
-	int i;
-	off_t o;
-	struct stat st;
-
-	AZ(fstat(fd, &st));
-	xxxassert(S_ISREG(st.st_mode));
-
-	bs = *granularity;
-	fssize = stv_fsspace(fd, &bs);
-	xxxassert(bs % *granularity == 0);
-
-	if ((size == NULL || *size == '\0') && st.st_size != 0) {
-		/*
-		 * We have no size specification, but an existing file,
-		 * use its existing size.
-		 */
-		l = st.st_size;
-	} else {
-		AN(size);
-		q = VNUM_2bytes(size, &l, fssize);
-
-		if (q != NULL)
-			ARGV_ERR("(%s) size \"%s\": %s\n", size, ctx, q);
-
-		if (l < 1024*1024)
-			ARGV_ERR("(-spersistent) size \"%s\": too small, "
-				 "did you forget to specify M or G?\n", size);
-	}
-
-	/*
-	 * This trickery wouldn't be necessary if X/Open would
-	 * just add OFF_MAX to <limits.h>...
-	 */
-	i = 0;
-	while(1) {
-		o = l;
-		if (o == l && o > 0)
-			break;
-		l >>= 1;
-		i++;
-	}
-	if (i)
-		fprintf(stderr, "WARNING: (%s) file size reduced"
-		    " to %ju due to system \"off_t\" limitations\n", ctx, l);
-	else if (l - st.st_size > fssize) {
-		l = fssize * 80 / 100;
-		fprintf(stderr, "WARNING: (%s) file size reduced"
-		    " to %ju (80%% of available disk space)\n", ctx, l);
-	}
-
-	if (sizeof(void *) == 4 && l > INT32_MAX) { /*lint !e506 !e774 !e845 */
-		fprintf(stderr,
-		    "NB: Storage size limited to 2GB on 32 bit architecture,\n"
-		    "NB: otherwise we could run out of address space.\n"
-		);
-		l = INT32_MAX;
-	}
-
-	/* round down to multiple of filesystem blocksize or pagesize */
-	l -= (l % bs);
-
-	*granularity = bs;
-	return(l);
-}
diff --git a/bin/varnishd/storage.h b/bin/varnishd/storage.h
deleted file mode 100644
index 80cad13..0000000
--- a/bin/varnishd/storage.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*-
- * Copyright (c) 2006 Verdens Gang AS
- * Copyright (c) 2006-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * This defines the backend interface between the stevedore and the
- * pluggable storage implementations.
- *
- */
-
-struct stv_objsecrets;
-struct stevedore;
-struct sess;
-struct lru;
-
-typedef void storage_init_f(struct stevedore *, int ac, char * const *av);
-typedef void storage_open_f(const struct stevedore *);
-typedef struct storage *storage_alloc_f(struct stevedore *, size_t size);
-typedef void storage_trim_f(struct storage *, size_t size);
-typedef void storage_free_f(struct storage *);
-typedef struct object *storage_allocobj_f(struct stevedore *, struct sess *sp,
-    unsigned ltot, const struct stv_objsecrets *);
-typedef void storage_close_f(const struct stevedore *);
-
-/* Prototypes for VCL variable responders */
-#define VRTSTVTYPE(ct) typedef ct storage_var_##ct(const struct stevedore *);
-#include "tbl/vrt_stv_var.h"
-#undef VRTSTVTYPE
-
-/*--------------------------------------------------------------------*/
-
-struct stevedore {
-	unsigned		magic;
-#define STEVEDORE_MAGIC		0x4baf43db
-	const char		*name;
-	unsigned		transient;
-	storage_init_f		*init;		/* called by mgt process */
-	storage_open_f		*open;		/* called by cache process */
-	storage_alloc_f		*alloc;		/* --//-- */
-	storage_trim_f		*trim;		/* --//-- */
-	storage_free_f		*free;		/* --//-- */
-	storage_close_f		*close;		/* --//-- */
-	storage_allocobj_f	*allocobj;	/* --//-- */
-
-	struct lru		*lru;
-
-#define VRTSTVVAR(nm, vtype, ctype, dval) storage_var_##ctype *var_##nm;
-#include "tbl/vrt_stv_var.h"
-#undef VRTSTVVAR
-
-	/* private fields */
-	void			*priv;
-
-	VTAILQ_ENTRY(stevedore)	list;
-	char			ident[16];	/* XXX: match VSM_chunk.ident */
-};
-
-/*--------------------------------------------------------------------*/
-int STV_GetFile(const char *fn, int *fdp, const char **fnp, const char *ctx);
-uintmax_t STV_FileSize(int fd, const char *size, unsigned *granularity,
-    const char *ctx);
-struct object *STV_MkObject(struct sess *sp, void *ptr, unsigned ltot,
-    const struct stv_objsecrets *soc);
-
-struct lru *LRU_Alloc(void);
-void LRU_Free(struct lru *lru);
-
-/*--------------------------------------------------------------------*/
-extern const struct stevedore sma_stevedore;
-extern const struct stevedore smf_stevedore;
-extern const struct stevedore smp_stevedore;
-#ifdef HAVE_LIBUMEM
-extern const struct stevedore smu_stevedore;
-#endif
diff --git a/bin/varnishd/storage/stevedore.c b/bin/varnishd/storage/stevedore.c
new file mode 100644
index 0000000..65f9f49
--- /dev/null
+++ b/bin/varnishd/storage/stevedore.c
@@ -0,0 +1,602 @@
+/*-
+ * Copyright (c) 2007-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Dag-Erling Smørgav <des at des.no>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * STEVEDORE: one who works at or is responsible for loading and
+ * unloading ships in port.  Example: "on the wharves, stevedores were
+ * unloading cargo from the far corners of the world." Origin: Spanish
+ * estibador, from estibar to pack.  First Known Use: 1788
+ */
+
+#include "config.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "cache.h"
+
+#include "storage/storage.h"
+#include "vav.h"
+#include "vcli_priv.h"
+#include "vrt.h"
+#include "vrt_obj.h"
+
+static VTAILQ_HEAD(, stevedore)	stevedores =
+    VTAILQ_HEAD_INITIALIZER(stevedores);
+
+static const struct stevedore * volatile stv_next;
+
+static struct stevedore *stv_transient;
+
+/*---------------------------------------------------------------------
+ * Default objcore methods
+ */
+
+static struct object * __match_proto__(getobj_f)
+default_oc_getobj(struct worker *wrk, struct objcore *oc)
+{
+	struct object *o;
+
+	(void)wrk;
+	if (oc->priv == NULL)
+		return (NULL);
+	CAST_OBJ_NOTNULL(o, oc->priv, OBJECT_MAGIC);
+	return (o);
+}
+
+static void
+default_oc_freeobj(struct objcore *oc)
+{
+	struct object *o;
+
+	CAST_OBJ_NOTNULL(o, oc->priv, OBJECT_MAGIC);
+	oc->priv = NULL;
+	oc->methods = NULL;
+
+	STV_Freestore(o);
+	STV_free(o->objstore);
+}
+
+static struct lru *
+default_oc_getlru(const struct objcore *oc)
+{
+	struct object *o;
+
+	CAST_OBJ_NOTNULL(o, oc->priv, OBJECT_MAGIC);
+	return (o->objstore->stevedore->lru);
+}
+
+static struct objcore_methods default_oc_methods = {
+	.getobj = default_oc_getobj,
+	.freeobj = default_oc_freeobj,
+	.getlru = default_oc_getlru,
+};
+
+
+/*--------------------------------------------------------------------
+ */
+
+struct lru *
+LRU_Alloc(void)
+{
+	struct lru *l;
+
+	ALLOC_OBJ(l, LRU_MAGIC);
+	AN(l);
+	VTAILQ_INIT(&l->lru_head);
+	Lck_New(&l->mtx, lck_lru);
+	return (l);
+}
+
+void
+LRU_Free(struct lru *lru)
+{
+	CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
+	Lck_Delete(&lru->mtx);
+	FREE_OBJ(lru);
+}
+
+/*--------------------------------------------------------------------
+ * XXX: trust pointer writes to be atomic
+ */
+
+static struct stevedore *
+stv_pick_stevedore(const struct sess *sp, const char **hint)
+{
+	struct stevedore *stv;
+
+	AN(hint);
+	if (*hint != NULL && **hint != '\0') {
+		VTAILQ_FOREACH(stv, &stevedores, list) {
+			if (!strcmp(stv->ident, *hint))
+				return (stv);
+		}
+		if (!strcmp(TRANSIENT_STORAGE, *hint))
+			return (stv_transient);
+
+		/* Hint was not valid, nuke it */
+		WSP(sp, SLT_Debug, "Storage hint not usable");
+		*hint = NULL;
+	}
+	/* pick a stevedore and bump the head along */
+	stv = VTAILQ_NEXT(stv_next, list);
+	if (stv == NULL)
+		stv = VTAILQ_FIRST(&stevedores);
+	AN(stv);
+	AN(stv->name);
+	stv_next = stv;
+	return (stv);
+}
+
+/*-------------------------------------------------------------------*/
+
+static struct storage *
+stv_alloc(const struct sess *sp, size_t size)
+{
+	struct storage *st;
+	struct stevedore *stv;
+	unsigned fail = 0;
+
+	/*
+	 * Always use the stevedore which allocated the object in order to
+	 * keep an object inside the same stevedore.
+	 */
+	CHECK_OBJ_NOTNULL(sp->obj, OBJECT_MAGIC);
+	stv = sp->obj->objstore->stevedore;
+	CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC);
+
+	if (size > (size_t)(params->fetch_maxchunksize) << 10)
+		size = (size_t)(params->fetch_maxchunksize) << 10;
+
+	for (;;) {
+		/* try to allocate from it */
+		AN(stv->alloc);
+		st = stv->alloc(stv, size);
+		if (st != NULL)
+			break;
+
+		if (size > params->fetch_chunksize * 1024LL) {
+			size >>= 1;
+			continue;
+		}
+
+		/* no luck; try to free some space and keep trying */
+		if (EXP_NukeOne(sp, stv->lru) == -1)
+			break;
+
+		/* Enough is enough: try another if we have one */
+		if (++fail >= params->nuke_limit)
+			break;
+	}
+	if (st != NULL)
+		CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
+	return (st);
+}
+
+
+/*-------------------------------------------------------------------*
+ * Structure used to transport internal knowledge from STV_NewObject()
+ * to STV_MkObject().  Nobody else should mess with this struct.
+ */
+
+struct stv_objsecrets {
+	unsigned	magic;
+#define STV_OBJ_SECRETES_MAGIC	0x78c87247
+	uint16_t	nhttp;
+	unsigned	lhttp;
+	unsigned	wsl;
+	struct exp	*exp;
+};
+
+/*--------------------------------------------------------------------
+ * This function is called by stevedores ->allocobj() method, which
+ * very often will be stv_default_allocobj() below, to convert a slab
+ * of storage into object which the stevedore can then register in its
+ * internal state, before returning it to STV_NewObject().
+ * As you probably guessed: All this for persistence.
+ */
+
+struct object *
+STV_MkObject(struct sess *sp, void *ptr, unsigned ltot,
+    const struct stv_objsecrets *soc)
+{
+	struct object *o;
+	unsigned l;
+
+	CHECK_OBJ_NOTNULL(soc, STV_OBJ_SECRETES_MAGIC);
+
+	assert(PAOK(ptr));
+	assert(PAOK(soc->wsl));
+	assert(PAOK(soc->lhttp));
+
+	assert(ltot >= sizeof *o + soc->lhttp + soc->wsl);
+
+	o = ptr;
+	memset(o, 0, sizeof *o);
+	o->magic = OBJECT_MAGIC;
+
+	l = PRNDDN(ltot - (sizeof *o + soc->lhttp));
+	assert(l >= soc->wsl);
+
+	o->http = HTTP_create(o + 1, soc->nhttp);
+	WS_Init(o->ws_o, "obj", (char *)(o + 1) + soc->lhttp, soc->wsl);
+	WS_Assert(o->ws_o);
+	assert(o->ws_o->e <= (char*)ptr + ltot);
+
+	http_Setup(o->http, o->ws_o);
+	o->http->magic = HTTP_MAGIC;
+	o->exp = *soc->exp;
+	VTAILQ_INIT(&o->store);
+	sp->wrk->stats.n_object++;
+
+	if (sp->objcore != NULL) {
+		CHECK_OBJ_NOTNULL(sp->objcore, OBJCORE_MAGIC);
+
+		o->objcore = sp->objcore;
+		sp->objcore = NULL;     /* refcnt follows pointer. */
+		BAN_NewObjCore(o->objcore);
+
+		o->objcore->methods = &default_oc_methods;
+		o->objcore->priv = o;
+	}
+	return (o);
+}
+
+/*--------------------------------------------------------------------
+ * This is the default ->allocobj() which all stevedores who do not
+ * implement persistent storage can rely on.
+ */
+
+static struct object *
+stv_default_allocobj(struct stevedore *stv, struct sess *sp, unsigned ltot,
+    const struct stv_objsecrets *soc)
+{
+	struct object *o;
+	struct storage *st;
+
+	CHECK_OBJ_NOTNULL(soc, STV_OBJ_SECRETES_MAGIC);
+	st = stv->alloc(stv, ltot);
+	if (st == NULL)
+		return (NULL);
+	if (st->space < ltot) {
+		stv->free(st);
+		return (NULL);
+	}
+	ltot = st->len = st->space;
+	o = STV_MkObject(sp, st->ptr, ltot, soc);
+	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+	o->objstore = st;
+	return (o);
+}
+
+/*-------------------------------------------------------------------
+ * Allocate storage for an object, based on the header information.
+ * XXX: If we know (a hint of) the length, we could allocate space
+ * XXX: for the body in the same allocation while we are at it.
+ */
+
+struct object *
+STV_NewObject(struct sess *sp, const char *hint, unsigned wsl, struct exp *ep,
+    uint16_t nhttp)
+{
+	struct object *o;
+	struct stevedore *stv, *stv0;
+	unsigned lhttp, ltot;
+	struct stv_objsecrets soc;
+	int i;
+
+	assert(wsl > 0);
+	wsl = PRNDUP(wsl);
+
+	lhttp = HTTP_estimate(nhttp);
+	lhttp = PRNDUP(lhttp);
+
+	memset(&soc, 0, sizeof soc);
+	soc.magic = STV_OBJ_SECRETES_MAGIC;
+	soc.nhttp = nhttp;
+	soc.lhttp = lhttp;
+	soc.wsl = wsl;
+	soc.exp = ep;
+
+	ltot = sizeof *o + wsl + lhttp;
+
+	stv = stv0 = stv_pick_stevedore(sp, &hint);
+	AN(stv->allocobj);
+	o = stv->allocobj(stv, sp, ltot, &soc);
+	if (o == NULL && hint == NULL) {
+		do {
+			stv = stv_pick_stevedore(sp, &hint);
+			AN(stv->allocobj);
+			o = stv->allocobj(stv, sp, ltot, &soc);
+		} while (o == NULL && stv != stv0);
+	}
+	if (o == NULL) {
+		/* no luck; try to free some space and keep trying */
+		for (i = 0; o == NULL && i < params->nuke_limit; i++) {
+			if (EXP_NukeOne(sp, stv->lru) == -1)
+				break;
+			o = stv->allocobj(stv, sp, ltot, &soc);
+		}
+	}
+
+	if (o == NULL)
+		return (NULL);
+	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+	CHECK_OBJ_NOTNULL(o->objstore, STORAGE_MAGIC);
+	return (o);
+}
+
+/*-------------------------------------------------------------------*/
+
+void
+STV_Freestore(struct object *o)
+{
+	struct storage *st, *stn;
+
+	if (o->esidata != NULL) {
+		STV_free(o->esidata);
+		o->esidata = NULL;
+	}
+	VTAILQ_FOREACH_SAFE(st, &o->store, list, stn) {
+		CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
+		VTAILQ_REMOVE(&o->store, st, list);
+		STV_free(st);
+	}
+}
+
+/*-------------------------------------------------------------------*/
+
+struct storage *
+STV_alloc(const struct sess *sp, size_t size)
+{
+
+	return (stv_alloc(sp, size));
+}
+
+void
+STV_trim(struct storage *st, size_t size)
+{
+
+	CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
+	AN(st->stevedore);
+	if (st->stevedore->trim)
+		st->stevedore->trim(st, size);
+}
+
+void
+STV_free(struct storage *st)
+{
+
+	CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
+	AN(st->stevedore);
+	AN(st->stevedore->free);
+	st->stevedore->free(st);
+}
+
+void
+STV_open(void)
+{
+	struct stevedore *stv;
+
+	VTAILQ_FOREACH(stv, &stevedores, list) {
+		stv->lru = LRU_Alloc();
+		if (stv->open != NULL)
+			stv->open(stv);
+	}
+	stv = stv_transient;
+	if (stv->open != NULL) {
+		stv->lru = LRU_Alloc();
+		stv->open(stv);
+	}
+}
+
+void
+STV_close(void)
+{
+	struct stevedore *stv;
+
+	VTAILQ_FOREACH(stv, &stevedores, list)
+		if (stv->close != NULL)
+			stv->close(stv);
+	stv = stv_transient;
+	if (stv->close != NULL)
+		stv->close(stv);
+}
+
+/*--------------------------------------------------------------------
+ * Parse a stevedore argument on the form:
+ *	[ name '=' ] strategy [ ',' arg ] *
+ */
+
+static const struct choice STV_choice[] = {
+	{ "file",	&smf_stevedore },
+	{ "malloc",	&sma_stevedore },
+	{ "persistent",	&smp_stevedore },
+#ifdef HAVE_LIBUMEM
+	{ "umem",	&smu_stevedore },
+#endif
+	{ NULL,		NULL }
+};
+
+void
+STV_Config(const char *spec)
+{
+	char **av;
+	const char *p, *q;
+	struct stevedore *stv;
+	const struct stevedore *stv2;
+	int ac, l;
+	static unsigned seq = 0;
+
+	ASSERT_MGT();
+	p = strchr(spec, '=');
+	q = strchr(spec, ',');
+	if (p != NULL && (q == NULL || q > p)) {
+		av = VAV_Parse(p + 1, NULL, ARGV_COMMA);
+	} else {
+		av = VAV_Parse(spec, NULL, ARGV_COMMA);
+		p = NULL;
+	}
+	AN(av);
+
+	if (av[0] != NULL)
+		ARGV_ERR("%s\n", av[0]);
+
+	if (av[1] == NULL)
+		ARGV_ERR("-s argument lacks strategy {malloc, file, ...}\n");
+
+	for (ac = 0; av[ac + 2] != NULL; ac++)
+		continue;
+
+	stv2 = pick(STV_choice, av[1], "storage");
+	AN(stv2);
+
+	/* Append strategy to ident string */
+	VSB_printf(vident, ",-s%s", av[1]);
+
+	av += 2;
+
+	CHECK_OBJ_NOTNULL(stv2, STEVEDORE_MAGIC);
+	ALLOC_OBJ(stv, STEVEDORE_MAGIC);
+	AN(stv);
+
+	*stv = *stv2;
+	AN(stv->name);
+	AN(stv->alloc);
+	if (stv->allocobj == NULL)
+		stv->allocobj = stv_default_allocobj;
+
+	if (p == NULL)
+		bprintf(stv->ident, "s%u", seq++);
+	else {
+		l = p - spec;
+		if (l > sizeof stv->ident - 1)
+			l = sizeof stv->ident - 1;
+		bprintf(stv->ident, "%.*s", l, spec);
+	}
+
+	VTAILQ_FOREACH(stv2, &stevedores, list) {
+		if (strcmp(stv2->ident, stv->ident))
+			continue;
+		ARGV_ERR("(-s%s=%s) already defined once\n",
+		    stv->ident, stv->name);
+	}
+
+	if (stv->init != NULL)
+		stv->init(stv, ac, av);
+	else if (ac != 0)
+		ARGV_ERR("(-s%s) too many arguments\n", stv->name);
+
+	if (!strcmp(stv->ident, TRANSIENT_STORAGE)) {
+		stv->transient = 1;
+		AZ(stv_transient);
+		stv_transient = stv;
+	} else {
+		VTAILQ_INSERT_TAIL(&stevedores, stv, list);
+		if (!stv_next)
+			stv_next = VTAILQ_FIRST(&stevedores);
+	}
+}
+
+/*--------------------------------------------------------------------*/
+
+void
+STV_Config_Transient(void)
+{
+
+	ASSERT_MGT();
+
+	if (stv_transient == NULL)
+		STV_Config(TRANSIENT_STORAGE "=malloc");
+}
+
+/*--------------------------------------------------------------------*/
+
+static void
+stv_cli_list(struct cli *cli, const char * const *av, void *priv)
+{
+	struct stevedore *stv;
+
+	ASSERT_MGT();
+	(void)av;
+	(void)priv;
+	VCLI_Out(cli, "Storage devices:\n");
+	stv = stv_transient;
+		VCLI_Out(cli, "\tstorage.%s = %s\n", stv->ident, stv->name);
+	VTAILQ_FOREACH(stv, &stevedores, list)
+		VCLI_Out(cli, "\tstorage.%s = %s\n", stv->ident, stv->name);
+}
+
+/*--------------------------------------------------------------------*/
+
+struct cli_proto cli_stv[] = {
+	{ "storage.list", "storage.list", "List storage devices\n",
+	    0, 0, "", stv_cli_list },
+	{ NULL}
+};
+
+/*--------------------------------------------------------------------
+ * VRT functions for stevedores
+ */
+
+static const struct stevedore *
+stv_find(const char *nm)
+{
+	const struct stevedore *stv;
+
+	VTAILQ_FOREACH(stv, &stevedores, list)
+		if (!strcmp(stv->ident, nm))
+			return (stv);
+	if (!strcmp(TRANSIENT_STORAGE, nm))
+		return (stv_transient);
+	return (NULL);
+}
+
+int
+VRT_Stv(const char *nm)
+{
+
+	if (stv_find(nm) != NULL)
+		return (1);
+	return (0);
+}
+
+#define VRTSTVVAR(nm, vtype, ctype, dval)	\
+ctype						\
+VRT_Stv_##nm(const char *nm)			\
+{						\
+	const struct stevedore *stv;		\
+						\
+	stv = stv_find(nm);			\
+	if (stv == NULL)			\
+		return (dval);			\
+	if (stv->var_##nm == NULL)		\
+		return (dval);			\
+	return (stv->var_##nm(stv));		\
+}
+
+#include "tbl/vrt_stv_var.h"
+#undef VRTSTVVAR
diff --git a/bin/varnishd/storage/stevedore_utils.c b/bin/varnishd/storage/stevedore_utils.c
new file mode 100644
index 0000000..3daebf8
--- /dev/null
+++ b/bin/varnishd/storage/stevedore_utils.c
@@ -0,0 +1,244 @@
+/*-
+ * Copyright (c) 2006 Verdens Gang AS
+ * Copyright (c) 2006-2010 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Utility functions for stevedores and storage modules
+ */
+
+#include "config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_MOUNT_H
+#  include <sys/mount.h>
+#endif
+#ifdef HAVE_SYS_STATVFS_H
+#  include <sys/statvfs.h>
+#endif
+#ifdef HAVE_SYS_VFS_H
+#  include <sys/vfs.h>
+#endif
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "mgt.h"
+
+#include "storage/storage.h"
+#include "vnum.h"
+
+#ifndef O_LARGEFILE
+#define O_LARGEFILE	0
+#endif
+
+/*--------------------------------------------------------------------
+ * Get a storage file.
+ *
+ * The fn argument can be an existing file, an existing directory or
+ * a nonexistent filename in an existing directory.
+ *
+ * If a directory is specified, the file will be anonymous (unlinked)
+ *
+ * Return:
+ *	 0 if the file was preexisting.
+ *	 1 if the file was created.
+ *	 2 if the file is anonymous.
+ *
+ * Uses ARGV_ERR to exit in case of trouble.
+ */
+
+int
+STV_GetFile(const char *fn, int *fdp, const char **fnp, const char *ctx)
+{
+	int fd;
+	struct stat st;
+	int retval = 1;
+	char buf[FILENAME_MAX];
+
+	AN(fn);
+	AN(fnp);
+	AN(fdp);
+	*fnp = NULL;
+	*fdp = -1;
+
+	/* try to create a new file of this name */
+	fd = open(fn, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE, 0600);
+	if (fd >= 0) {
+		*fdp = fd;
+		*fnp = fn;
+		return (retval);
+	}
+
+	if (stat(fn, &st))
+		ARGV_ERR(
+		    "(%s) \"%s\" does not exist and could not be created\n",
+		    ctx, fn);
+
+	if (S_ISDIR(st.st_mode)) {
+		bprintf(buf, "%s/varnish.XXXXXX", fn);
+		fd = mkstemp(buf);
+		if (fd < 0)
+			ARGV_ERR("(%s) \"%s\" mkstemp(%s) failed (%s)\n",
+			    ctx, fn, buf, strerror(errno));
+		AZ(unlink(buf));
+		*fnp = strdup(buf);
+		AN(*fnp);
+		retval = 2;
+	} else if (S_ISREG(st.st_mode)) {
+		fd = open(fn, O_RDWR | O_LARGEFILE);
+		if (fd < 0)
+			ARGV_ERR("(%s) \"%s\" could not open (%s)\n",
+			    ctx, fn, strerror(errno));
+		*fnp = fn;
+		retval = 0;
+	} else
+		ARGV_ERR(
+		    "(%s) \"%s\" is neither file nor directory\n", ctx, fn);
+
+	AZ(fstat(fd, &st));
+	if (!S_ISREG(st.st_mode))
+		ARGV_ERR("(%s) \"%s\" was not a file after opening\n",
+		    ctx, fn);
+
+	*fdp = fd;
+	return (retval);
+}
+
+/*--------------------------------------------------------------------
+ * Figure out how much space is in a filesystem
+ */
+
+static uintmax_t
+stv_fsspace(int fd, unsigned *bs)
+{
+	uintmax_t bsize, bavail;
+#if defined(HAVE_SYS_STATVFS_H)
+	struct statvfs fsst;
+
+	AZ(fstatvfs(fd, &fsst));
+	bsize = fsst.f_frsize;
+	bavail = fsst.f_bavail;
+#elif defined(HAVE_SYS_MOUNT_H) || defined(HAVE_SYS_VFS_H)
+	struct statfs fsst;
+
+	AZ(fstatfs(sc->fd, &fsst));
+	bsize = fsst.f_bsize;
+	bavail = fsst.f_bavail;
+#else
+#error no struct statfs / struct statvfs
+#endif
+
+	/* We use units of the larger of filesystem blocksize and pagesize */
+	if (*bs < bsize)
+		*bs = bsize;
+	xxxassert(*bs % bsize == 0);
+	return (bsize * bavail);
+}
+
+
+/*--------------------------------------------------------------------
+ * Decide file size.
+ *
+ * If the sizespecification is empty and the file exists with non-zero
+ * size, use that, otherwise, interpret the specification.
+ *
+ * Handle off_t sizes and pointer width limitations.
+ */
+
+uintmax_t
+STV_FileSize(int fd, const char *size, unsigned *granularity, const char *ctx)
+{
+	uintmax_t l, fssize;
+	unsigned bs;
+	const char *q;
+	int i;
+	off_t o;
+	struct stat st;
+
+	AZ(fstat(fd, &st));
+	xxxassert(S_ISREG(st.st_mode));
+
+	bs = *granularity;
+	fssize = stv_fsspace(fd, &bs);
+	xxxassert(bs % *granularity == 0);
+
+	if ((size == NULL || *size == '\0') && st.st_size != 0) {
+		/*
+		 * We have no size specification, but an existing file,
+		 * use its existing size.
+		 */
+		l = st.st_size;
+	} else {
+		AN(size);
+		q = VNUM_2bytes(size, &l, fssize);
+
+		if (q != NULL)
+			ARGV_ERR("(%s) size \"%s\": %s\n", size, ctx, q);
+
+		if (l < 1024*1024)
+			ARGV_ERR("(-spersistent) size \"%s\": too small, "
+				 "did you forget to specify M or G?\n", size);
+	}
+
+	/*
+	 * This trickery wouldn't be necessary if X/Open would
+	 * just add OFF_MAX to <limits.h>...
+	 */
+	i = 0;
+	while(1) {
+		o = l;
+		if (o == l && o > 0)
+			break;
+		l >>= 1;
+		i++;
+	}
+	if (i)
+		fprintf(stderr, "WARNING: (%s) file size reduced"
+		    " to %ju due to system \"off_t\" limitations\n", ctx, l);
+	else if (l - st.st_size > fssize) {
+		l = fssize * 80 / 100;
+		fprintf(stderr, "WARNING: (%s) file size reduced"
+		    " to %ju (80%% of available disk space)\n", ctx, l);
+	}
+
+	if (sizeof(void *) == 4 && l > INT32_MAX) { /*lint !e506 !e774 !e845 */
+		fprintf(stderr,
+		    "NB: Storage size limited to 2GB on 32 bit architecture,\n"
+		    "NB: otherwise we could run out of address space.\n"
+		);
+		l = INT32_MAX;
+	}
+
+	/* round down to multiple of filesystem blocksize or pagesize */
+	l -= (l % bs);
+
+	*granularity = bs;
+	return(l);
+}
diff --git a/bin/varnishd/storage/storage.h b/bin/varnishd/storage/storage.h
new file mode 100644
index 0000000..80cad13
--- /dev/null
+++ b/bin/varnishd/storage/storage.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2006 Verdens Gang AS
+ * Copyright (c) 2006-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * This defines the backend interface between the stevedore and the
+ * pluggable storage implementations.
+ *
+ */
+
+struct stv_objsecrets;
+struct stevedore;
+struct sess;
+struct lru;
+
+typedef void storage_init_f(struct stevedore *, int ac, char * const *av);
+typedef void storage_open_f(const struct stevedore *);
+typedef struct storage *storage_alloc_f(struct stevedore *, size_t size);
+typedef void storage_trim_f(struct storage *, size_t size);
+typedef void storage_free_f(struct storage *);
+typedef struct object *storage_allocobj_f(struct stevedore *, struct sess *sp,
+    unsigned ltot, const struct stv_objsecrets *);
+typedef void storage_close_f(const struct stevedore *);
+
+/* Prototypes for VCL variable responders */
+#define VRTSTVTYPE(ct) typedef ct storage_var_##ct(const struct stevedore *);
+#include "tbl/vrt_stv_var.h"
+#undef VRTSTVTYPE
+
+/*--------------------------------------------------------------------*/
+
+struct stevedore {
+	unsigned		magic;
+#define STEVEDORE_MAGIC		0x4baf43db
+	const char		*name;
+	unsigned		transient;
+	storage_init_f		*init;		/* called by mgt process */
+	storage_open_f		*open;		/* called by cache process */
+	storage_alloc_f		*alloc;		/* --//-- */
+	storage_trim_f		*trim;		/* --//-- */
+	storage_free_f		*free;		/* --//-- */
+	storage_close_f		*close;		/* --//-- */
+	storage_allocobj_f	*allocobj;	/* --//-- */
+
+	struct lru		*lru;
+
+#define VRTSTVVAR(nm, vtype, ctype, dval) storage_var_##ctype *var_##nm;
+#include "tbl/vrt_stv_var.h"
+#undef VRTSTVVAR
+
+	/* private fields */
+	void			*priv;
+
+	VTAILQ_ENTRY(stevedore)	list;
+	char			ident[16];	/* XXX: match VSM_chunk.ident */
+};
+
+/*--------------------------------------------------------------------*/
+int STV_GetFile(const char *fn, int *fdp, const char **fnp, const char *ctx);
+uintmax_t STV_FileSize(int fd, const char *size, unsigned *granularity,
+    const char *ctx);
+struct object *STV_MkObject(struct sess *sp, void *ptr, unsigned ltot,
+    const struct stv_objsecrets *soc);
+
+struct lru *LRU_Alloc(void);
+void LRU_Free(struct lru *lru);
+
+/*--------------------------------------------------------------------*/
+extern const struct stevedore sma_stevedore;
+extern const struct stevedore smf_stevedore;
+extern const struct stevedore smp_stevedore;
+#ifdef HAVE_LIBUMEM
+extern const struct stevedore smu_stevedore;
+#endif
diff --git a/bin/varnishd/storage/storage_file.c b/bin/varnishd/storage/storage_file.c
new file mode 100644
index 0000000..58e1d3d
--- /dev/null
+++ b/bin/varnishd/storage/storage_file.c
@@ -0,0 +1,615 @@
+/*-
+ * Copyright (c) 2006 Verdens Gang AS
+ * Copyright (c) 2006-2010 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Storage method based on mmap'ed file
+ */
+
+#include "config.h"
+
+#include <sys/mman.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "cache.h"
+#include "storage/storage.h"
+
+#include "vnum.h"
+
+#ifndef MAP_NOCORE
+#define MAP_NOCORE 0 /* XXX Linux */
+#endif
+
+#ifndef MAP_NOSYNC
+#define MAP_NOSYNC 0 /* XXX Linux */
+#endif
+
+#define MINPAGES		128
+
+/*
+ * Number of buckets on free-list.
+ *
+ * Last bucket is "larger than" so choose number so that the second
+ * to last bucket matches the 128k CHUNKSIZE in cache_fetch.c when
+ * using the a 4K minimal page size
+ */
+#define NBUCKET			(128 / 4 + 1)
+
+/*--------------------------------------------------------------------*/
+
+VTAILQ_HEAD(smfhead, smf);
+
+struct smf {
+	unsigned		magic;
+#define SMF_MAGIC		0x0927a8a0
+	struct storage		s;
+	struct smf_sc		*sc;
+
+	int			alloc;
+
+	off_t			size;
+	off_t			offset;
+	unsigned char		*ptr;
+
+	VTAILQ_ENTRY(smf)	order;
+	VTAILQ_ENTRY(smf)	status;
+	struct smfhead		*flist;
+};
+
+struct smf_sc {
+	unsigned		magic;
+#define SMF_SC_MAGIC		0x52962ee7
+	struct lock		mtx;
+	struct VSC_C_smf	*stats;
+
+	const char		*filename;
+	int			fd;
+	unsigned		pagesize;
+	uintmax_t		filesize;
+	struct smfhead		order;
+	struct smfhead		free[NBUCKET];
+	struct smfhead		used;
+};
+
+/*--------------------------------------------------------------------*/
+
+static void
+smf_initfile(struct smf_sc *sc, const char *size)
+{
+	sc->filesize = STV_FileSize(sc->fd, size, &sc->pagesize, "-sfile");
+
+	AZ(ftruncate(sc->fd, (off_t)sc->filesize));
+
+	/* XXX: force block allocation here or in open ? */
+}
+
+static const char default_size[] = "100M";
+static const char default_filename[] = ".";
+
+static void
+smf_init(struct stevedore *parent, int ac, char * const *av)
+{
+	const char *size, *fn, *r;
+	struct smf_sc *sc;
+	unsigned u;
+	uintmax_t page_size;
+
+	AZ(av[ac]);
+
+	fn = default_filename;
+	size = default_size;
+	page_size = getpagesize();
+
+	if (ac > 3)
+		ARGV_ERR("(-sfile) too many arguments\n");
+	if (ac > 0 && *av[0] != '\0')
+		fn = av[0];
+	if (ac > 1 && *av[1] != '\0')
+		size = av[1];
+	if (ac > 2 && *av[2] != '\0') {
+
+		r = VNUM_2bytes(av[2], &page_size, 0);
+		if (r != NULL)
+			ARGV_ERR("(-sfile) granularity \"%s\": %s\n", av[2], r);
+	}
+
+	AN(fn);
+	AN(size);
+
+	ALLOC_OBJ(sc, SMF_SC_MAGIC);
+	XXXAN(sc);
+	VTAILQ_INIT(&sc->order);
+	for (u = 0; u < NBUCKET; u++)
+		VTAILQ_INIT(&sc->free[u]);
+	VTAILQ_INIT(&sc->used);
+	sc->pagesize = page_size;
+
+	parent->priv = sc;
+
+	(void)STV_GetFile(fn, &sc->fd, &sc->filename, "-sfile");
+
+	mgt_child_inherit(sc->fd, "storage_file");
+	smf_initfile(sc, size);
+}
+
+/*--------------------------------------------------------------------
+ * Insert/Remove from correct freelist
+ */
+
+static void
+insfree(struct smf_sc *sc, struct smf *sp)
+{
+	size_t b;
+	struct smf *sp2;
+	size_t ns;
+
+	assert(sp->alloc == 0);
+	assert(sp->flist == NULL);
+	Lck_AssertHeld(&sc->mtx);
+	b = sp->size / sc->pagesize;
+	if (b >= NBUCKET) {
+		b = NBUCKET - 1;
+		sc->stats->g_smf_large++;
+	} else {
+		sc->stats->g_smf_frag++;
+	}
+	sp->flist = &sc->free[b];
+	ns = b * sc->pagesize;
+	VTAILQ_FOREACH(sp2, sp->flist, status) {
+		assert(sp2->size >= ns);
+		assert(sp2->alloc == 0);
+		assert(sp2->flist == sp->flist);
+		if (sp->offset < sp2->offset)
+			break;
+	}
+	if (sp2 == NULL)
+		VTAILQ_INSERT_TAIL(sp->flist, sp, status);
+	else
+		VTAILQ_INSERT_BEFORE(sp2, sp, status);
+}
+
+static void
+remfree(const struct smf_sc *sc, struct smf *sp)
+{
+	size_t b;
+
+	assert(sp->alloc == 0);
+	assert(sp->flist != NULL);
+	Lck_AssertHeld(&sc->mtx);
+	b = sp->size / sc->pagesize;
+	if (b >= NBUCKET) {
+		b = NBUCKET - 1;
+		sc->stats->g_smf_large--;
+	} else {
+		sc->stats->g_smf_frag--;
+	}
+	assert(sp->flist == &sc->free[b]);
+	VTAILQ_REMOVE(sp->flist, sp, status);
+	sp->flist = NULL;
+}
+
+/*--------------------------------------------------------------------
+ * Allocate a range from the first free range that is large enough.
+ */
+
+static struct smf *
+alloc_smf(struct smf_sc *sc, size_t bytes)
+{
+	struct smf *sp, *sp2;
+	size_t b;
+
+	assert(!(bytes % sc->pagesize));
+	b = bytes / sc->pagesize;
+	if (b >= NBUCKET)
+		b = NBUCKET - 1;
+	for (sp = NULL; b < NBUCKET - 1; b++) {
+		sp = VTAILQ_FIRST(&sc->free[b]);
+		if (sp != NULL)
+			break;
+	}
+	if (sp == NULL) {
+		VTAILQ_FOREACH(sp, &sc->free[NBUCKET -1], status)
+			if (sp->size >= bytes)
+				break;
+	}
+	if (sp == NULL)
+		return (sp);
+
+	assert(sp->size >= bytes);
+	remfree(sc, sp);
+
+	if (sp->size == bytes) {
+		sp->alloc = 1;
+		VTAILQ_INSERT_TAIL(&sc->used, sp, status);
+		return (sp);
+	}
+
+	/* Split from front */
+	sp2 = malloc(sizeof *sp2);
+	XXXAN(sp2);
+	sc->stats->g_smf++;
+	*sp2 = *sp;
+
+	sp->offset += bytes;
+	sp->ptr += bytes;
+	sp->size -= bytes;
+
+	sp2->size = bytes;
+	sp2->alloc = 1;
+	VTAILQ_INSERT_BEFORE(sp, sp2, order);
+	VTAILQ_INSERT_TAIL(&sc->used, sp2, status);
+	insfree(sc, sp);
+	return (sp2);
+}
+
+/*--------------------------------------------------------------------
+ * Free a range.  Attempt merge forward and backward, then sort into
+ * free list according to age.
+ */
+
+static void
+free_smf(struct smf *sp)
+{
+	struct smf *sp2;
+	struct smf_sc *sc = sp->sc;
+
+	CHECK_OBJ_NOTNULL(sp, SMF_MAGIC);
+	assert(sp->alloc != 0);
+	assert(sp->size > 0);
+	assert(!(sp->size % sc->pagesize));
+	VTAILQ_REMOVE(&sc->used, sp, status);
+	sp->alloc = 0;
+
+	sp2 = VTAILQ_NEXT(sp, order);
+	if (sp2 != NULL &&
+	    sp2->alloc == 0 &&
+	    (sp2->ptr == sp->ptr + sp->size) &&
+	    (sp2->offset == sp->offset + sp->size)) {
+		sp->size += sp2->size;
+		VTAILQ_REMOVE(&sc->order, sp2, order);
+		remfree(sc, sp2);
+		free(sp2);
+		sc->stats->g_smf--;
+	}
+
+	sp2 = VTAILQ_PREV(sp, smfhead, order);
+	if (sp2 != NULL &&
+	    sp2->alloc == 0 &&
+	    (sp->ptr == sp2->ptr + sp2->size) &&
+	    (sp->offset == sp2->offset + sp2->size)) {
+		remfree(sc, sp2);
+		sp2->size += sp->size;
+		VTAILQ_REMOVE(&sc->order, sp, order);
+		free(sp);
+		sc->stats->g_smf--;
+		sp = sp2;
+	}
+
+	insfree(sc, sp);
+}
+
+/*--------------------------------------------------------------------
+ * Trim the tail of a range.
+ */
+
+static void
+trim_smf(struct smf *sp, size_t bytes)
+{
+	struct smf *sp2;
+	struct smf_sc *sc = sp->sc;
+
+	assert(sp->alloc != 0);
+	assert(bytes > 0);
+	assert(bytes < sp->size);
+	assert(!(bytes % sc->pagesize));
+	assert(!(sp->size % sc->pagesize));
+	CHECK_OBJ_NOTNULL(sp, SMF_MAGIC);
+	sp2 = malloc(sizeof *sp2);
+	XXXAN(sp2);
+	sc->stats->g_smf++;
+	*sp2 = *sp;
+
+	sp2->size -= bytes;
+	sp->size = bytes;
+	sp2->ptr += bytes;
+	sp2->offset += bytes;
+	VTAILQ_INSERT_AFTER(&sc->order, sp, sp2, order);
+	VTAILQ_INSERT_TAIL(&sc->used, sp2, status);
+	free_smf(sp2);
+}
+
+/*--------------------------------------------------------------------
+ * Insert a newly created range as busy, then free it to do any collapses
+ */
+
+static void
+new_smf(struct smf_sc *sc, unsigned char *ptr, off_t off, size_t len)
+{
+	struct smf *sp, *sp2;
+
+	assert(!(len % sc->pagesize));
+	sp = calloc(sizeof *sp, 1);
+	XXXAN(sp);
+	sp->magic = SMF_MAGIC;
+	sp->s.magic = STORAGE_MAGIC;
+	sc->stats->g_smf++;
+
+	sp->sc = sc;
+	sp->size = len;
+	sp->ptr = ptr;
+	sp->offset = off;
+	sp->alloc = 1;
+
+	VTAILQ_FOREACH(sp2, &sc->order, order) {
+		if (sp->ptr < sp2->ptr) {
+			VTAILQ_INSERT_BEFORE(sp2, sp, order);
+			break;
+		}
+	}
+	if (sp2 == NULL)
+		VTAILQ_INSERT_TAIL(&sc->order, sp, order);
+
+	VTAILQ_INSERT_HEAD(&sc->used, sp, status);
+
+	free_smf(sp);
+}
+
+/*--------------------------------------------------------------------*/
+
+/*
+ * XXX: This may be too aggressive and soak up too much address room.
+ * XXX: On the other hand, the user, directly or implicitly asked us to
+ * XXX: use this much storage, so we should make a decent effort.
+ * XXX: worst case (I think), malloc will fail.
+ */
+
+static void
+smf_open_chunk(struct smf_sc *sc, off_t sz, off_t off, off_t *fail, off_t *sum)
+{
+	void *p;
+	off_t h;
+
+	assert(sz != 0);
+	assert(!(sz % sc->pagesize));
+
+	if (*fail < (uintmax_t)sc->pagesize * MINPAGES)
+		return;
+
+	if (sz > 0 && sz < *fail && sz < SSIZE_MAX) {
+		p = mmap(NULL, sz, PROT_READ|PROT_WRITE,
+		    MAP_NOCORE | MAP_NOSYNC | MAP_SHARED, sc->fd, off);
+		if (p != MAP_FAILED) {
+			(void) madvise(p, sz, MADV_RANDOM);
+			(*sum) += sz;
+			new_smf(sc, p, off, sz);
+			return;
+		}
+	}
+
+	if (sz < *fail)
+		*fail = sz;
+
+	h = sz / 2;
+	if (h > SSIZE_MAX)
+		h = SSIZE_MAX;
+	h -= (h % sc->pagesize);
+
+	smf_open_chunk(sc, h, off, fail, sum);
+	smf_open_chunk(sc, sz - h, off + h, fail, sum);
+}
+
+static void
+smf_open(const struct stevedore *st)
+{
+	struct smf_sc *sc;
+	off_t fail = 1 << 30;	/* XXX: where is OFF_T_MAX ? */
+	off_t sum = 0;
+
+	CAST_OBJ_NOTNULL(sc, st->priv, SMF_SC_MAGIC);
+	sc->stats = VSM_Alloc(sizeof *sc->stats,
+	    VSC_CLASS, VSC_TYPE_SMF, st->ident);
+	Lck_New(&sc->mtx, lck_smf);
+	Lck_Lock(&sc->mtx);
+	smf_open_chunk(sc, sc->filesize, 0, &fail, &sum);
+	Lck_Unlock(&sc->mtx);
+	printf("SMF.%s mmap'ed %ju bytes of %ju\n",
+	    st->ident, (uintmax_t)sum, sc->filesize);
+
+	/* XXX */
+	if (sum < MINPAGES * (off_t)getpagesize())
+		exit (2);
+
+	sc->stats->g_space += sc->filesize;
+}
+
+/*--------------------------------------------------------------------*/
+
+static struct storage *
+smf_alloc(struct stevedore *st, size_t size)
+{
+	struct smf *smf;
+	struct smf_sc *sc;
+
+	CAST_OBJ_NOTNULL(sc, st->priv, SMF_SC_MAGIC);
+	assert(size > 0);
+	size += (sc->pagesize - 1);
+	size &= ~(sc->pagesize - 1);
+	Lck_Lock(&sc->mtx);
+	sc->stats->c_req++;
+	smf = alloc_smf(sc, size);
+	if (smf == NULL) {
+		sc->stats->c_fail++;
+		Lck_Unlock(&sc->mtx);
+		return (NULL);
+	}
+	CHECK_OBJ_NOTNULL(smf, SMF_MAGIC);
+	sc->stats->g_alloc++;
+	sc->stats->c_bytes += smf->size;
+	sc->stats->g_bytes += smf->size;
+	sc->stats->g_space -= smf->size;
+	Lck_Unlock(&sc->mtx);
+	CHECK_OBJ_NOTNULL(&smf->s, STORAGE_MAGIC);	/*lint !e774 */
+	XXXAN(smf);
+	assert(smf->size == size);
+	smf->s.space = size;
+	smf->s.priv = smf;
+	smf->s.ptr = smf->ptr;
+	smf->s.len = 0;
+	smf->s.stevedore = st;
+#ifdef SENDFILE_WORKS
+	smf->s.fd = smf->sc->fd;
+	smf->s.where = smf->offset;
+#endif
+	return (&smf->s);
+}
+
+/*--------------------------------------------------------------------*/
+
+static void
+smf_trim(struct storage *s, size_t size)
+{
+	struct smf *smf;
+	struct smf_sc *sc;
+
+	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
+	assert(size > 0);
+	assert(size <= s->space);
+	xxxassert(size > 0);	/* XXX: seen */
+	CAST_OBJ_NOTNULL(smf, s->priv, SMF_MAGIC);
+	assert(size <= smf->size);
+	sc = smf->sc;
+	size += (sc->pagesize - 1);
+	size &= ~(sc->pagesize - 1);
+	if (smf->size > size) {
+		Lck_Lock(&sc->mtx);
+		sc->stats->c_freed += (smf->size - size);
+		sc->stats->g_bytes -= (smf->size - size);
+		sc->stats->g_space += (smf->size - size);
+		trim_smf(smf, size);
+		assert(smf->size == size);
+		Lck_Unlock(&sc->mtx);
+		s->space = size;
+	}
+}
+
+/*--------------------------------------------------------------------*/
+
+static void __match_proto__(storage_free_f)
+smf_free(struct storage *s)
+{
+	struct smf *smf;
+	struct smf_sc *sc;
+
+	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
+	CAST_OBJ_NOTNULL(smf, s->priv, SMF_MAGIC);
+	sc = smf->sc;
+	Lck_Lock(&sc->mtx);
+	sc->stats->g_alloc--;
+	sc->stats->c_freed += smf->size;
+	sc->stats->g_bytes -= smf->size;
+	sc->stats->g_space += smf->size;
+	free_smf(smf);
+	Lck_Unlock(&sc->mtx);
+}
+
+/*--------------------------------------------------------------------*/
+
+const struct stevedore smf_stevedore = {
+	.magic	=	STEVEDORE_MAGIC,
+	.name	=	"file",
+	.init	=	smf_init,
+	.open	=	smf_open,
+	.alloc	=	smf_alloc,
+	.trim	=	smf_trim,
+	.free	=	smf_free,
+};
+
+#ifdef INCLUDE_TEST_DRIVER
+
+void vca_flush(struct sess *sp) {}
+
+#define N	100
+#define M	(128*1024)
+
+struct storage *s[N];
+
+static void
+dumpit(void)
+{
+	struct smf_sc *sc = smf_stevedore.priv;
+	struct smf *s;
+
+	return (0);
+	printf("----------------\n");
+	printf("Order:\n");
+	VTAILQ_FOREACH(s, &sc->order, order) {
+		printf("%10p %12ju %12ju %12ju\n",
+		    s, s->offset, s->size, s->offset + s->size);
+	}
+	printf("Used:\n");
+	VTAILQ_FOREACH(s, &sc->used, status) {
+		printf("%10p %12ju %12ju %12ju\n",
+		    s, s->offset, s->size, s->offset + s->size);
+	}
+	printf("Free:\n");
+	VTAILQ_FOREACH(s, &sc->free, status) {
+		printf("%10p %12ju %12ju %12ju\n",
+		    s, s->offset, s->size, s->offset + s->size);
+	}
+	printf("================\n");
+}
+
+int
+main(int argc, char **argv)
+{
+	int i, j;
+
+	setbuf(stdout, NULL);
+	smf_init(&smf_stevedore, "");
+	smf_open(&smf_stevedore);
+	while (1) {
+		dumpit();
+		i = random() % N;
+		do
+			j = random() % M;
+		while (j == 0);
+		if (s[i] == NULL) {
+			s[i] = smf_alloc(&smf_stevedore, j);
+			printf("A %10p %12d\n", s[i], j);
+		} else if (j < s[i]->space) {
+			smf_trim(s[i], j);
+			printf("T %10p %12d\n", s[i], j);
+		} else {
+			smf_free(s[i]);
+			printf("D %10p\n", s[i]);
+			s[i] = NULL;
+		}
+	}
+}
+
+#endif /* INCLUDE_TEST_DRIVER */
diff --git a/bin/varnishd/storage/storage_malloc.c b/bin/varnishd/storage/storage_malloc.c
new file mode 100644
index 0000000..03d6a55
--- /dev/null
+++ b/bin/varnishd/storage/storage_malloc.c
@@ -0,0 +1,256 @@
+/*-
+ * Copyright (c) 2006 Verdens Gang AS
+ * Copyright (c) 2006-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Storage method based on malloc(3)
+ */
+
+#include "config.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "cache.h"
+#include "storage/storage.h"
+
+#include "vnum.h"
+
+struct sma_sc {
+	unsigned		magic;
+#define SMA_SC_MAGIC		0x1ac8a345
+	struct lock		sma_mtx;
+	size_t			sma_max;
+	size_t			sma_alloc;
+	struct VSC_C_sma	*stats;
+};
+
+struct sma {
+	unsigned		magic;
+#define SMA_MAGIC		0x69ae9bb9
+	struct storage		s;
+	size_t			sz;
+	struct sma_sc		*sc;
+};
+
+static struct storage *
+sma_alloc(struct stevedore *st, size_t size)
+{
+	struct sma_sc *sma_sc;
+	struct sma *sma = NULL;
+	void *p;
+
+	CAST_OBJ_NOTNULL(sma_sc, st->priv, SMA_SC_MAGIC);
+	Lck_Lock(&sma_sc->sma_mtx);
+	sma_sc->stats->c_req++;
+	if (sma_sc->sma_alloc + size > sma_sc->sma_max) {
+		sma_sc->stats->c_fail += size;
+		size = 0;
+	} else {
+		sma_sc->sma_alloc += size;
+		sma_sc->stats->c_bytes += size;
+		sma_sc->stats->g_alloc++;
+		sma_sc->stats->g_bytes += size;
+		if (sma_sc->sma_max != SIZE_MAX)
+			sma_sc->stats->g_space -= size;
+	}
+	Lck_Unlock(&sma_sc->sma_mtx);
+
+	if (size == 0)
+		return (NULL);
+
+	/*
+	 * Do not collaps the sma allocation with sma->s.ptr: it is not
+	 * a good idea.  Not only would it make ->trim impossible,
+	 * performance-wise it would be a catastropy with chunksized
+	 * allocations growing another full page, just to accomodate the sma.
+	 */
+
+	p = malloc(size);
+	if (p != NULL) {
+		ALLOC_OBJ(sma, SMA_MAGIC);
+		if (sma != NULL)
+			sma->s.ptr = p;
+		else
+			free(p);
+	}
+	if (sma == NULL) {
+		Lck_Lock(&sma_sc->sma_mtx);
+		/*
+		 * XXX: Not nice to have counters go backwards, but we do
+		 * XXX: Not want to pick up the lock twice just for stats.
+		 */
+		sma_sc->stats->c_fail++;
+		sma_sc->stats->c_bytes -= size;
+		sma_sc->stats->g_alloc--;
+		sma_sc->stats->g_bytes -= size;
+		if (sma_sc->sma_max != SIZE_MAX)
+			sma_sc->stats->g_space += size;
+		Lck_Unlock(&sma_sc->sma_mtx);
+		return (NULL);
+	}
+	sma->sc = sma_sc;
+	sma->sz = size;
+	sma->s.priv = sma;
+	sma->s.len = 0;
+	sma->s.space = size;
+#ifdef SENDFILE_WORKS
+	sma->s.fd = -1;
+#endif
+	sma->s.stevedore = st;
+	sma->s.magic = STORAGE_MAGIC;
+	return (&sma->s);
+}
+
+static void __match_proto__(storage_free_f)
+sma_free(struct storage *s)
+{
+	struct sma_sc *sma_sc;
+	struct sma *sma;
+
+	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
+	CAST_OBJ_NOTNULL(sma, s->priv, SMA_MAGIC);
+	sma_sc = sma->sc;
+	assert(sma->sz == sma->s.space);
+	Lck_Lock(&sma_sc->sma_mtx);
+	sma_sc->sma_alloc -= sma->sz;
+	sma_sc->stats->g_alloc--;
+	sma_sc->stats->g_bytes -= sma->sz;
+	sma_sc->stats->c_freed += sma->sz;
+	if (sma_sc->sma_max != SIZE_MAX)
+		sma_sc->stats->g_space += sma->sz;
+	Lck_Unlock(&sma_sc->sma_mtx);
+	free(sma->s.ptr);
+	free(sma);
+}
+
+static void
+sma_trim(struct storage *s, size_t size)
+{
+	struct sma_sc *sma_sc;
+	struct sma *sma;
+	void *p;
+	size_t delta;
+
+	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
+	CAST_OBJ_NOTNULL(sma, s->priv, SMA_MAGIC);
+	sma_sc = sma->sc;
+
+	assert(sma->sz == sma->s.space);
+	assert(size < sma->sz);
+	delta = sma->sz - size;
+	if (delta < 256)
+		return;
+	if ((p = realloc(sma->s.ptr, size)) != NULL) {
+		Lck_Lock(&sma_sc->sma_mtx);
+		sma_sc->sma_alloc -= delta;
+		sma_sc->stats->g_bytes -= delta;
+		sma_sc->stats->c_freed += delta;
+		if (sma_sc->sma_max != SIZE_MAX)
+			sma_sc->stats->g_space += delta;
+		sma->sz = size;
+		Lck_Unlock(&sma_sc->sma_mtx);
+		sma->s.ptr = p;
+		s->space = size;
+	}
+}
+
+static double
+sma_used_space(const struct stevedore *st)
+{
+	struct sma_sc *sma_sc;
+
+	CAST_OBJ_NOTNULL(sma_sc, st->priv, SMA_SC_MAGIC);
+	return (sma_sc->sma_alloc);
+}
+
+static double
+sma_free_space(const struct stevedore *st)
+{
+	struct sma_sc *sma_sc;
+
+	CAST_OBJ_NOTNULL(sma_sc, st->priv, SMA_SC_MAGIC);
+	return (sma_sc->sma_max - sma_sc->sma_alloc);
+}
+
+static void
+sma_init(struct stevedore *parent, int ac, char * const *av)
+{
+	const char *e;
+	uintmax_t u;
+	struct sma_sc *sc;
+
+	ASSERT_MGT();
+	ALLOC_OBJ(sc, SMA_SC_MAGIC);
+	AN(sc);
+	sc->sma_max = SIZE_MAX;
+	assert(sc->sma_max == SIZE_MAX);
+	parent->priv = sc;
+
+	AZ(av[ac]);
+	if (ac > 1)
+		ARGV_ERR("(-smalloc) too many arguments\n");
+
+	if (ac == 0 || *av[0] == '\0')
+		 return;
+
+	e = VNUM_2bytes(av[0], &u, 0);
+	if (e != NULL)
+		ARGV_ERR("(-smalloc) size \"%s\": %s\n", av[0], e);
+	if ((u != (uintmax_t)(size_t)u))
+		ARGV_ERR("(-smalloc) size \"%s\": too big\n", av[0]);
+	if (u < 1024*1024)
+		ARGV_ERR("(-smalloc) size \"%s\": too small, "
+			 "did you forget to specify M or G?\n", av[0]);
+
+	sc->sma_max = u;
+}
+
+static void
+sma_open(const struct stevedore *st)
+{
+	struct sma_sc *sma_sc;
+
+	CAST_OBJ_NOTNULL(sma_sc, st->priv, SMA_SC_MAGIC);
+	Lck_New(&sma_sc->sma_mtx, lck_sma);
+	sma_sc->stats = VSM_Alloc(sizeof *sma_sc->stats,
+	    VSC_CLASS, VSC_TYPE_SMA, st->ident);
+	memset(sma_sc->stats, 0, sizeof *sma_sc->stats);
+	if (sma_sc->sma_max != SIZE_MAX)
+		sma_sc->stats->g_space = sma_sc->sma_max;
+}
+
+const struct stevedore sma_stevedore = {
+	.magic	=	STEVEDORE_MAGIC,
+	.name	=	"malloc",
+	.init	=	sma_init,
+	.open	=	sma_open,
+	.alloc	=	sma_alloc,
+	.free	=	sma_free,
+	.trim	=	sma_trim,
+	.var_free_space =	sma_free_space,
+	.var_used_space =	sma_used_space,
+};
diff --git a/bin/varnishd/storage/storage_persistent.c b/bin/varnishd/storage/storage_persistent.c
new file mode 100644
index 0000000..84aef5c
--- /dev/null
+++ b/bin/varnishd/storage/storage_persistent.c
@@ -0,0 +1,678 @@
+/*-
+ * Copyright (c) 2008-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Persistent storage method
+ *
+ * XXX: Before we start the client or maybe after it stops, we should give the
+ * XXX: stevedores a chance to examine their storage for consistency.
+ *
+ * XXX: Do we ever free the LRU-lists ?
+ */
+
+#include "config.h"
+
+#include <sys/param.h>
+#include <sys/mman.h>
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "cache.h"
+#include "storage/storage.h"
+
+#include "hash_slinger.h"
+#include "vcli.h"
+#include "vcli_priv.h"
+#include "vend.h"
+#include "vsha256.h"
+
+#include "persistent.h"
+#include "storage/storage_persistent.h"
+
+/*--------------------------------------------------------------------*/
+
+/*
+ * silos is unlocked, it only changes during startup when we are
+ * single-threaded
+ */
+static VTAILQ_HEAD(,smp_sc)	silos = VTAILQ_HEAD_INITIALIZER(silos);
+
+/*--------------------------------------------------------------------
+ * Add bans to silos
+ */
+
+static void
+smp_appendban(struct smp_sc *sc, struct smp_signctx *ctx,
+    uint32_t len, const uint8_t *ban)
+{
+	uint8_t *ptr, *ptr2;
+
+	(void)sc;
+	ptr = ptr2 = SIGN_END(ctx);
+
+	memcpy(ptr, "BAN", 4);
+	ptr += 4;
+
+	vbe32enc(ptr, len);
+	ptr += 4;
+
+	memcpy(ptr, ban, len);
+	ptr += len;
+
+	smp_append_sign(ctx, ptr2, ptr - ptr2);
+}
+
+/* Trust that cache_ban.c takes care of locking */
+
+void
+SMP_NewBan(const uint8_t *ban, unsigned ln)
+{
+	struct smp_sc *sc;
+
+	VTAILQ_FOREACH(sc, &silos, list) {
+		smp_appendban(sc, &sc->ban1, ln, ban);
+		smp_appendban(sc, &sc->ban2, ln, ban);
+	}
+}
+
+/*--------------------------------------------------------------------
+ * Attempt to open and read in a ban list
+ */
+
+static int
+smp_open_bans(struct smp_sc *sc, struct smp_signctx *ctx)
+{
+	uint8_t *ptr, *pe;
+	uint32_t length;
+	int i, retval = 0;
+
+	ASSERT_CLI();
+	(void)sc;
+	i = smp_chk_sign(ctx);
+	if (i)
+		return (i);
+	ptr = SIGN_DATA(ctx);
+	pe = ptr + ctx->ss->length;
+
+	while (ptr < pe) {
+		if (memcmp(ptr, "BAN", 4)) {
+			retval = 1001;
+			break;
+		}
+		ptr += 4;
+
+		length = vbe32dec(ptr);
+		ptr += 4;
+
+		if (ptr + length > pe) {
+			retval = 1003;
+			break;
+		}
+
+		BAN_Reload(ptr, length);
+
+		ptr += length;
+	}
+	assert(ptr <= pe);
+	return (retval);
+}
+
+/*--------------------------------------------------------------------
+ * Attempt to open and read in a segment list
+ */
+
+static int
+smp_open_segs(struct smp_sc *sc, struct smp_signctx *ctx)
+{
+	uint64_t length, l;
+	struct smp_segptr *ss, *se;
+	struct smp_seg *sg, *sg1, *sg2;
+	int i, n = 0;
+
+	ASSERT_CLI();
+	i = smp_chk_sign(ctx);
+	if (i)
+		return (i);
+
+	ss = SIGN_DATA(ctx);
+	length = ctx->ss->length;
+
+	if (length == 0) {
+		/* No segments */
+		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
+		return (0);
+	}
+	se = ss + length / sizeof *ss;
+	se--;
+	assert(ss <= se);
+
+	/*
+	 * Locate the free reserve, there are only two basic cases,
+	 * but once we start dropping segments, things gets more complicated.
+	 */
+
+	sc->free_offset = se->offset + se->length;
+	l = sc->mediasize - sc->free_offset;
+	if (se->offset > ss->offset && l >= sc->free_reserve) {
+		/*
+		 * [__xxxxyyyyzzzz___]
+		 * Plenty of space at tail, do nothing.
+		 */
+	} else if (ss->offset > se->offset) {
+		/*
+		 * [zzzz____xxxxyyyy_]
+		 * (make) space between ends
+		 * We might nuke the entire tail end without getting
+		 * enough space, in which case we fall through to the
+		 * last check.
+		 */
+		while (ss < se && ss->offset > se->offset) {
+			l = ss->offset - (se->offset + se->length);
+			if (l > sc->free_reserve)
+				break;
+			ss++;
+			n++;
+		}
+	}
+
+	if (l < sc->free_reserve) {
+		/*
+		 * [__xxxxyyyyzzzz___]
+		 * (make) space at front
+		 */
+		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
+		while (ss < se) {
+			l = ss->offset - sc->free_offset;
+			if (l > sc->free_reserve)
+				break;
+			ss++;
+			n++;
+		}
+	}
+
+	assert (l >= sc->free_reserve);
+
+
+	sg1 = NULL;
+	sg2 = NULL;
+	for(; ss <= se; ss++) {
+		ALLOC_OBJ(sg, SMP_SEG_MAGIC);
+		AN(sg);
+		sg->lru = LRU_Alloc();
+		CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
+		sg->p = *ss;
+
+		sg->flags |= SMP_SEG_MUSTLOAD;
+
+		/*
+		 * HACK: prevent save_segs from nuking segment until we have
+		 * HACK: loaded it.
+		 */
+		sg->nobj = 1;
+		if (sg1 != NULL) {
+			assert(sg1->p.offset != sg->p.offset);
+			if (sg1->p.offset < sg->p.offset)
+				assert(smp_segend(sg1) <= sg->p.offset);
+			else
+				assert(smp_segend(sg) <= sg1->p.offset);
+		}
+		if (sg2 != NULL) {
+			assert(sg2->p.offset != sg->p.offset);
+			if (sg2->p.offset < sg->p.offset)
+				assert(smp_segend(sg2) <= sg->p.offset);
+			else
+				assert(smp_segend(sg) <= sg2->p.offset);
+		}
+
+		/* XXX: check that they are inside silo */
+		/* XXX: check that they don't overlap */
+		/* XXX: check that they are serial */
+		sg->sc = sc;
+		VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
+		sg2 = sg;
+		if (sg1 == NULL)
+			sg1 = sg;
+	}
+	printf("Dropped %d segments to make free_reserve\n", n);
+	return (0);
+}
+
+/*--------------------------------------------------------------------
+ * Silo worker thread
+ */
+
+static void *
+smp_thread(struct sess *sp, void *priv)
+{
+	struct smp_sc	*sc;
+	struct smp_seg *sg;
+
+	(void)sp;
+	CAST_OBJ_NOTNULL(sc, priv, SMP_SC_MAGIC);
+
+	/* First, load all the objects from all segments */
+	VTAILQ_FOREACH(sg, &sc->segments, list)
+		if (sg->flags & SMP_SEG_MUSTLOAD)
+			smp_load_seg(sp, sc, sg);
+
+	sc->flags |= SMP_SC_LOADED;
+	BAN_TailDeref(&sc->tailban);
+	AZ(sc->tailban);
+	printf("Silo completely loaded\n");
+	while (1) {
+		(void)sleep (1);
+		sg = VTAILQ_FIRST(&sc->segments);
+		if (sg != NULL && sg -> sc->cur_seg &&
+		    sg->nobj == 0) {
+			Lck_Lock(&sc->mtx);
+			smp_save_segs(sc);
+			Lck_Unlock(&sc->mtx);
+		}
+	}
+	NEEDLESS_RETURN(NULL);
+}
+
+/*--------------------------------------------------------------------
+ * Open a silo in the worker process
+ */
+
+static void
+smp_open(const struct stevedore *st)
+{
+	struct smp_sc	*sc;
+
+	ASSERT_CLI();
+
+	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
+
+	Lck_New(&sc->mtx, lck_smp);
+	Lck_Lock(&sc->mtx);
+
+	sc->stevedore = st;
+
+	/* We trust the parent to give us a valid silo, for good measure: */
+	AZ(smp_valid_silo(sc));
+
+	AZ(mprotect(sc->base, 4096, PROT_READ));
+
+	sc->ident = SIGN_DATA(&sc->idn);
+
+	/* We attempt ban1 first, and if that fails, try ban2 */
+	if (smp_open_bans(sc, &sc->ban1))
+		AZ(smp_open_bans(sc, &sc->ban2));
+
+	/* We attempt seg1 first, and if that fails, try seg2 */
+	if (smp_open_segs(sc, &sc->seg1))
+		AZ(smp_open_segs(sc, &sc->seg2));
+
+	/*
+	 * Grap a reference to the tail of the ban list, until the thread
+	 * has loaded all objects, so we can be sure that all of our
+	 * proto-bans survive until then.
+	 */
+	sc->tailban = BAN_TailRef();
+	AN(sc->tailban);
+
+	/* XXX: save segments to ensure consistency between seg1 & seg2 ? */
+
+	/* XXX: abandon early segments to make sure we have free space ? */
+
+	/* Open a new segment, so we are ready to write */
+	smp_new_seg(sc);
+
+	/* Start the worker silo worker thread, it will load the objects */
+	WRK_BgThread(&sc->thread, "persistence", smp_thread, sc);
+
+	VTAILQ_INSERT_TAIL(&silos, sc, list);
+	Lck_Unlock(&sc->mtx);
+}
+
+/*--------------------------------------------------------------------
+ * Close a silo
+ */
+
+static void
+smp_close(const struct stevedore *st)
+{
+	struct smp_sc	*sc;
+
+	ASSERT_CLI();
+
+	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
+	Lck_Lock(&sc->mtx);
+	smp_close_seg(sc, sc->cur_seg);
+	Lck_Unlock(&sc->mtx);
+
+	/* XXX: reap thread */
+}
+
+/*--------------------------------------------------------------------
+ * Allocate a bite.
+ *
+ * Allocate [min_size...max_size] space from the bottom of the segment,
+ * as is convenient.
+ *
+ * If 'so' + 'idx' is given, also allocate a smp_object from the top
+ * of the segment.
+ *
+ * Return the segment in 'ssg' if given.
+ */
+
+static struct storage *
+smp_allocx(struct stevedore *st, size_t min_size, size_t max_size,
+    struct smp_object **so, unsigned *idx, struct smp_seg **ssg)
+{
+	struct smp_sc *sc;
+	struct storage *ss;
+	struct smp_seg *sg;
+	unsigned tries;
+	uint64_t left, extra;
+
+	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
+	assert(min_size <= max_size);
+
+	max_size = IRNUP(sc, max_size);
+	min_size = IRNUP(sc, min_size);
+
+	extra = IRNUP(sc, sizeof(*ss));
+	if (so != NULL) {
+		extra += sizeof(**so);
+		AN(idx);
+	}
+
+	Lck_Lock(&sc->mtx);
+	sg = NULL;
+	ss = NULL;
+	for (tries = 0; tries < 3; tries++) {
+		left = smp_spaceleft(sc, sc->cur_seg);
+		if (left >= extra + min_size)
+			break;
+		smp_close_seg(sc, sc->cur_seg);
+		smp_new_seg(sc);
+	}
+	if (left >= extra + min_size)  {
+		if (left < extra + max_size)
+			max_size = IRNDN(sc, left - extra);
+
+		sg = sc->cur_seg;
+		ss = (void*)(sc->base + sc->next_bot);
+		sc->next_bot += max_size + IRNUP(sc, sizeof(*ss));
+		sg->nalloc++;
+		if (so != NULL) {
+			sc->next_top -= sizeof(**so);
+			*so = (void*)(sc->base + sc->next_top);
+			/* Render this smp_object mostly harmless */
+			(*so)->ttl = 0.;
+			(*so)->ban = 0.;
+			(*so)->ptr = 0;;
+			sg->objs = *so;
+			*idx = ++sg->p.lobjlist;
+		}
+		(void)smp_spaceleft(sc, sg);	/* for the assert */
+	}
+	Lck_Unlock(&sc->mtx);
+
+	if (ss == NULL)
+		return (ss);
+	AN(sg);
+	assert(max_size >= min_size);
+
+	/* Fill the storage structure */
+	memset(ss, 0, sizeof *ss);
+	ss->magic = STORAGE_MAGIC;
+	ss->ptr = PRNUP(sc, ss + 1);
+	ss->space = max_size;
+	ss->priv = sc;
+	ss->stevedore = st;
+#ifdef SENDFILE_WORKS
+	ss->fd = sc->fd;
+#endif
+	if (ssg != NULL)
+		*ssg = sg;
+	return (ss);
+}
+
+/*--------------------------------------------------------------------
+ * Allocate an object
+ */
+
+static struct object *
+smp_allocobj(struct stevedore *stv, struct sess *sp, unsigned ltot,
+    const struct stv_objsecrets *soc)
+{
+	struct object *o;
+	struct storage *st;
+	struct smp_sc	*sc;
+	struct smp_seg *sg;
+	struct smp_object *so;
+	struct objcore *oc;
+	unsigned objidx;
+
+	if (sp->objcore == NULL)
+		return (NULL);		/* from cnt_error */
+	CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC);
+	AN(sp->objcore);
+	AN(sp->wrk->exp.ttl > 0.);
+
+	ltot = IRNUP(sc, ltot);
+
+	st = smp_allocx(stv, ltot, ltot, &so, &objidx, &sg);
+	if (st == NULL)
+		return (NULL);
+
+	assert(st->space >= ltot);
+	ltot = st->len = st->space;
+
+	o = STV_MkObject(sp, st->ptr, ltot, soc);
+	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+	o->objstore = st;
+
+	oc = o->objcore;
+	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	oc->flags |= OC_F_LRUDONTMOVE;
+
+	Lck_Lock(&sc->mtx);
+	sg->nfixed++;
+	sg->nobj++;
+
+	/* We have to do this somewhere, might as well be here... */
+	assert(sizeof so->hash == DIGEST_LEN);
+	memcpy(so->hash, oc->objhead->digest, DIGEST_LEN);
+	so->ttl = EXP_Grace(NULL, o);
+	so->ptr = (uint8_t*)o - sc->base;
+	so->ban = BAN_Time(oc->ban);
+
+	smp_init_oc(oc, sg, objidx);
+
+	Lck_Unlock(&sc->mtx);
+	return (o);
+}
+
+/*--------------------------------------------------------------------
+ * Allocate a bite
+ */
+
+static struct storage *
+smp_alloc(struct stevedore *st, size_t size)
+{
+
+	return (smp_allocx(st,
+	    size > 4096 ? 4096 : size, size, NULL, NULL, NULL));
+}
+
+/*--------------------------------------------------------------------
+ * Trim a bite
+ * XXX: We could trim the last allocation.
+ */
+
+static void
+smp_trim(struct storage *ss, size_t size)
+{
+
+	(void)ss;
+	(void)size;
+}
+
+/*--------------------------------------------------------------------
+ * We don't track frees of storage, we track the objects which own the
+ * storage and when there are no more objects in in the first segment,
+ * it can be reclaimed.
+ * XXX: We could free the last allocation, but does that happen ?
+ */
+
+static void __match_proto__(storage_free_f)
+smp_free(struct storage *st)
+{
+
+	/* XXX */
+	(void)st;
+}
+
+
+/*--------------------------------------------------------------------*/
+
+const struct stevedore smp_stevedore = {
+	.magic	=	STEVEDORE_MAGIC,
+	.name	=	"persistent",
+	.init	=	smp_mgt_init,
+	.open	=	smp_open,
+	.close	=	smp_close,
+	.alloc	=	smp_alloc,
+	.allocobj =	smp_allocobj,
+	.free	=	smp_free,
+	.trim	=	smp_trim,
+};
+
+/*--------------------------------------------------------------------
+ * Persistence is a bear to test unadultered, so we cheat by adding
+ * a cli command we can use to make it do tricks for us.
+ */
+
+static void
+debug_report_silo(struct cli *cli, const struct smp_sc *sc, int objs)
+{
+	struct smp_seg *sg;
+	struct objcore *oc;
+
+	VCLI_Out(cli, "Silo: %s (%s)\n",
+	    sc->stevedore->ident, sc->filename);
+	VTAILQ_FOREACH(sg, &sc->segments, list) {
+		VCLI_Out(cli, "  Seg: [0x%jx ... +0x%jx]\n",
+		   (uintmax_t)sg->p.offset, (uintmax_t)sg->p.length);
+		if (sg == sc->cur_seg)
+			VCLI_Out(cli,
+			   "    Alloc: [0x%jx ... 0x%jx] = 0x%jx free\n",
+			   (uintmax_t)(sc->next_bot),
+			   (uintmax_t)(sc->next_top),
+			   (uintmax_t)(sc->next_top - sc->next_bot));
+		VCLI_Out(cli, "    %u nobj, %u alloc, %u lobjlist, %u fixed\n",
+		    sg->nobj, sg->nalloc, sg->p.lobjlist, sg->nfixed);
+		if (objs) {
+			VTAILQ_FOREACH(oc, &sg->lru->lru_head, lru_list)
+				VCLI_Out(cli, "      OC %p\n", oc);
+		}
+	}
+}
+
+static void
+debug_persistent(struct cli *cli, const char * const * av, void *priv)
+{
+	struct smp_sc *sc;
+
+	(void)priv;
+
+	if (av[2] == NULL) {
+		VTAILQ_FOREACH(sc, &silos, list)
+			debug_report_silo(cli, sc, 0);
+		return;
+	}
+	VTAILQ_FOREACH(sc, &silos, list)
+		if (!strcmp(av[2], sc->stevedore->ident))
+			break;
+	if (sc == NULL) {
+		VCLI_Out(cli, "Silo <%s> not found\n", av[2]);
+		VCLI_SetResult(cli, CLIS_PARAM);
+		return;
+	}
+	if (av[3] == NULL) {
+		debug_report_silo(cli, sc, 0);
+		return;
+	}
+	Lck_Lock(&sc->mtx);
+	if (!strcmp(av[3], "sync")) {
+		smp_close_seg(sc, sc->cur_seg);
+		smp_new_seg(sc);
+	} else if (!strcmp(av[3], "dump")) {
+		debug_report_silo(cli, sc, 1);
+	} else {
+		VCLI_Out(cli, "Unknown operation\n");
+		VCLI_SetResult(cli, CLIS_PARAM);
+	}
+	Lck_Unlock(&sc->mtx);
+}
+
+static struct cli_proto debug_cmds[] = {
+        { "debug.persistent", "debug.persistent",
+                "Persistent debugging magic:\n"
+		"\tdebug.persistent [stevedore [cmd]]\n"
+		"With no cmd arg, a summary of the silo is returned.\n"
+		"Possible commands:\n"
+		"\tsync\tClose current segment, open a new one\n"
+		"\tdump\tinclude objcores in silo summary\n"
+		"",
+		0, 2, "d", debug_persistent },
+        { NULL }
+};
+
+/*--------------------------------------------------------------------*/
+
+void
+SMP_Init(void)
+{
+	CLI_AddFuncs(debug_cmds);
+}
+
+/*--------------------------------------------------------------------
+ * Pause until all silos have loaded.
+ */
+
+void
+SMP_Ready(void)
+{
+	struct smp_sc *sc;
+
+	ASSERT_CLI();
+	do {
+		VTAILQ_FOREACH(sc, &silos, list)
+			if (!(sc->flags & SMP_SC_LOADED))
+				break;
+		if (sc != NULL)
+			(void)sleep(1);
+	} while (sc != NULL);
+}
diff --git a/bin/varnishd/storage/storage_persistent.h b/bin/varnishd/storage/storage_persistent.h
new file mode 100644
index 0000000..84f3d21
--- /dev/null
+++ b/bin/varnishd/storage/storage_persistent.h
@@ -0,0 +1,219 @@
+/*-
+ * Copyright (c) 2008-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Persistent storage method
+ *
+ * XXX: Before we start the client or maybe after it stops, we should give the
+ * XXX: stevedores a chance to examine their storage for consistency.
+ *
+ * XXX: Do we ever free the LRU-lists ?
+ */
+
+#define ASSERT_SILO_THREAD(sc) \
+    do {assert(pthread_self() == (sc)->thread);} while (0)
+
+#define OC_F_NEEDFIXUP OC_F_PRIV
+
+/*
+ * Context for a signature.
+ *
+ * A signature is a sequence of bytes in the silo, signed by a SHA256 hash
+ * which follows the bytes.
+ *
+ * The context structure allows us to append to a signature without
+ * recalculating the entire SHA256 hash.
+ */
+
+struct smp_signctx {
+	struct smp_sign		*ss;
+	struct SHA256Context	ctx;
+	uint32_t		unique;
+	const char		*id;
+};
+
+struct smp_sc;
+
+/* XXX: name confusion with on-media version ? */
+struct smp_seg {
+	unsigned		magic;
+#define SMP_SEG_MAGIC		0x45c61895
+
+	struct smp_sc		*sc;
+	struct lru		*lru;
+
+	VTAILQ_ENTRY(smp_seg)	list;		/* on smp_sc.smp_segments */
+
+	struct smp_segptr	p;
+
+	unsigned		flags;
+#define SMP_SEG_MUSTLOAD	(1 << 0)
+#define SMP_SEG_LOADED		(1 << 1)
+
+	uint32_t		nobj;		/* Number of objects */
+	uint32_t		nalloc;		/* Allocations */
+	uint32_t		nfixed;		/* How many fixed objects */
+
+	/* Only for open segment */
+	struct smp_object	*objs;		/* objdesc array */
+	struct smp_signctx	ctx[1];
+};
+
+VTAILQ_HEAD(smp_seghead, smp_seg);
+
+struct smp_sc {
+	unsigned		magic;
+#define SMP_SC_MAGIC		0x7b73af0a
+	struct stevedore	*parent;
+
+	unsigned		flags;
+#define SMP_SC_LOADED		(1 << 0)
+
+	const struct stevedore	*stevedore;
+	int			fd;
+	const char		*filename;
+	off_t			mediasize;
+	uintptr_t		align;
+	uint32_t		granularity;
+	uint32_t		unique;
+
+	uint8_t			*base;
+
+	struct smp_ident	*ident;
+
+	struct smp_seghead	segments;
+	struct smp_seg		*cur_seg;
+	uint64_t		next_bot;	/* next alloc address bottom */
+	uint64_t		next_top;	/* next alloc address top */
+
+	uint64_t		free_offset;
+
+	pthread_t		thread;
+
+	VTAILQ_ENTRY(smp_sc)	list;
+
+	struct smp_signctx	idn;
+	struct smp_signctx	ban1;
+	struct smp_signctx	ban2;
+	struct smp_signctx	seg1;
+	struct smp_signctx	seg2;
+
+	struct ban		*tailban;
+
+	struct lock		mtx;
+
+	/* Cleaner metrics */
+
+	unsigned		min_nseg;
+	unsigned		aim_nseg;
+	unsigned		max_nseg;
+
+	uint64_t		min_segl;
+	uint64_t		aim_segl;
+	uint64_t		max_segl;
+
+	uint64_t		free_reserve;
+};
+
+/*--------------------------------------------------------------------*/
+
+/* Pointer round up/down & assert */
+#define PRNDN(sc, x)	((void*)RDN2((uintptr_t)(x), sc->align))
+#define PRNUP(sc, x)	((void*)RUP2((uintptr_t)(x), sc->align))
+#define PASSERTALIGN(sc, x)	assert(PRNDN(sc, x) == (x))
+
+/* Integer round up/down & assert */
+#define IRNDN(sc, x)	RDN2(x, sc->align)
+#define IRNUP(sc, x)	RUP2(x, sc->align)
+#define IASSERTALIGN(sc, x)	assert(IRNDN(sc, x) == (x))
+
+/*--------------------------------------------------------------------*/
+
+#define ASSERT_PTR_IN_SILO(sc, ptr) \
+	assert((const void*)(ptr) >= (const void*)((sc)->base) && \
+	    (const void*)(ptr) < (const void *)((sc)->base + (sc)->mediasize))
+
+/*--------------------------------------------------------------------*/
+
+#define SIGN_DATA(ctx)	((void *)((ctx)->ss + 1))
+#define SIGN_END(ctx)	((void *)((int8_t *)SIGN_DATA(ctx) + (ctx)->ss->length))
+
+/* storage_persistent_mgt.c */
+
+void smp_mgt_init(struct stevedore *parent, int ac, char * const *av);
+
+/* storage_persistent_silo.c */
+
+void smp_load_seg(const struct sess *sp, const struct smp_sc *sc,
+    struct smp_seg *sg);
+void smp_new_seg(struct smp_sc *sc);
+void smp_close_seg(struct smp_sc *sc, struct smp_seg *sg);
+void smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx);
+void smp_save_segs(struct smp_sc *sc);
+
+/* storage_persistent_subr.c */
+
+void smp_def_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
+    uint64_t off, const char *id);
+int smp_chk_sign(struct smp_signctx *ctx);
+void smp_append_sign(struct smp_signctx *ctx, const void *ptr, uint32_t len);
+void smp_reset_sign(struct smp_signctx *ctx);
+void smp_sync_sign(const struct smp_signctx *ctx);
+void smp_newsilo(struct smp_sc *sc);
+int smp_valid_silo(struct smp_sc *sc);
+
+/*--------------------------------------------------------------------
+ * Caculate payload of some stuff
+ */
+
+static inline uint64_t
+smp_stuff_len(const struct smp_sc *sc, unsigned stuff)
+{
+	uint64_t l;
+
+	assert(stuff < SMP_END_STUFF);
+	l = sc->ident->stuff[stuff + 1] - sc->ident->stuff[stuff];
+	l -= SMP_SIGN_SPACE;
+	return (l);
+}
+
+static inline uint64_t
+smp_segend(const struct smp_seg *sg)
+{
+
+	return (sg->p.offset + sg->p.length);
+}
+
+static inline uint64_t
+smp_spaceleft(const struct smp_sc *sc, const struct smp_seg *sg)
+{
+
+	IASSERTALIGN(sc, sc->next_bot);
+	assert(sc->next_bot <= sc->next_top - IRNUP(sc, SMP_SIGN_SPACE));
+	assert(sc->next_bot >= sg->p.offset);
+	assert(sc->next_top < sg->p.offset + sg->p.length);
+	return ((sc->next_top - sc->next_bot) - IRNUP(sc, SMP_SIGN_SPACE));
+}
diff --git a/bin/varnishd/storage/storage_persistent_mgt.c b/bin/varnishd/storage/storage_persistent_mgt.c
new file mode 100644
index 0000000..1631ea6
--- /dev/null
+++ b/bin/varnishd/storage/storage_persistent_mgt.c
@@ -0,0 +1,205 @@
+/*-
+ * Copyright (c) 2008-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Persistent storage method
+ *
+ * XXX: Before we start the client or maybe after it stops, we should give the
+ * XXX: stevedores a chance to examine their storage for consistency.
+ *
+ * XXX: Do we ever free the LRU-lists ?
+ */
+
+#include "config.h"
+
+#include <sys/mman.h>
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "cache.h"
+#include "storage/storage.h"
+
+#include "vsha256.h"
+
+#include "persistent.h"
+#include "storage/storage_persistent.h"
+
+#ifndef MAP_NOCORE
+#define MAP_NOCORE 0 /* XXX Linux */
+#endif
+
+#ifndef MAP_NOSYNC
+#define MAP_NOSYNC 0 /* XXX Linux */
+#endif
+
+/*--------------------------------------------------------------------
+ * Calculate cleaner metrics from silo dimensions
+ */
+
+static void
+smp_metrics(struct smp_sc *sc)
+{
+
+	/*
+	 * We do not want to loose too big chunks of the silos
+	 * content when we are forced to clean a segment.
+	 *
+	 * For now insist that a segment covers no more than 1% of the silo.
+	 *
+	 * XXX: This should possibly depend on the size of the silo so
+	 * XXX: trivially small silos do not run into trouble along
+	 * XXX: the lines of "one object per segment".
+	 */
+
+	sc->min_nseg = 10;
+	sc->max_segl = smp_stuff_len(sc, SMP_SPC_STUFF) / sc->min_nseg;
+
+	fprintf(stderr, "min_nseg = %u, max_segl = %ju\n",
+	    sc->min_nseg, (uintmax_t)sc->max_segl);
+
+	/*
+	 * The number of segments are limited by the size of the segment
+	 * table(s) and from that follows the minimum size of a segmement.
+	 */
+
+	sc->max_nseg = smp_stuff_len(sc, SMP_SEG1_STUFF) / sc->min_nseg;
+	sc->min_segl = smp_stuff_len(sc, SMP_SPC_STUFF) / sc->max_nseg;
+
+	while (sc->min_segl < sizeof(struct object)) {
+		sc->max_nseg /= 2;
+		sc->min_segl = smp_stuff_len(sc, SMP_SPC_STUFF) / sc->max_nseg;
+	}
+
+	fprintf(stderr, "max_nseg = %u, min_segl = %ju\n",
+	    sc->max_nseg, (uintmax_t)sc->min_segl);
+
+	/*
+	 * Set our initial aim point at the exponential average of the
+	 * two extremes.
+	 *
+	 * XXX: This is a pretty arbitrary choice, but having no idea
+	 * XXX: object count, size distribution or ttl pattern at this
+	 * XXX: point, we have to do something.
+	 */
+
+	sc->aim_nseg =
+	   (unsigned) exp((log(sc->min_nseg) + log(sc->max_nseg))*.5);
+	sc->aim_segl = smp_stuff_len(sc, SMP_SPC_STUFF) / sc->aim_nseg;
+
+	fprintf(stderr, "aim_nseg = %u, aim_segl = %ju\n",
+	    sc->aim_nseg, (uintmax_t)sc->aim_segl);
+
+	/*
+	 * How much space in the free reserve pool ?
+	 */
+	sc->free_reserve = sc->aim_segl * 10;
+
+	fprintf(stderr, "free_reserve = %ju\n", (uintmax_t)sc->free_reserve);
+}
+
+/*--------------------------------------------------------------------
+ * Set up persistent storage silo in the master process.
+ */
+
+void
+smp_mgt_init(struct stevedore *parent, int ac, char * const *av)
+{
+	struct smp_sc		*sc;
+	struct smp_sign		sgn;
+	void *target;
+	int i;
+
+	ASSERT_MGT();
+
+	AZ(av[ac]);
+#define SIZOF(foo)       fprintf(stderr, \
+    "sizeof(%s) = %zu = 0x%zx\n", #foo, sizeof(foo), sizeof(foo));
+	SIZOF(struct smp_ident);
+	SIZOF(struct smp_sign);
+	SIZOF(struct smp_segptr);
+	SIZOF(struct smp_object);
+#undef SIZOF
+
+	/* See comments in persistent.h */
+	assert(sizeof(struct smp_ident) == SMP_IDENT_SIZE);
+
+	/* Allocate softc */
+	ALLOC_OBJ(sc, SMP_SC_MAGIC);
+	XXXAN(sc);
+	sc->parent = parent;
+	sc->fd = -1;
+	VTAILQ_INIT(&sc->segments);
+
+	/* Argument processing */
+	if (ac != 2)
+		ARGV_ERR("(-spersistent) wrong number of arguments\n");
+
+	i = STV_GetFile(av[0], &sc->fd, &sc->filename, "-spersistent");
+	if (i == 2)
+		ARGV_ERR("(-spersistent) need filename (not directory)\n");
+
+	sc->align = sizeof(void*) * 2;
+	sc->granularity = getpagesize();
+	sc->mediasize = STV_FileSize(sc->fd, av[1], &sc->granularity,
+	    "-spersistent");
+
+	AZ(ftruncate(sc->fd, sc->mediasize));
+
+	/* Try to determine correct mmap address */
+	i = read(sc->fd, &sgn, sizeof sgn);
+	assert(i == sizeof sgn);
+	if (!strcmp(sgn.ident, "SILO"))
+		target = (void*)(uintptr_t)sgn.mapped;
+	else
+		target = NULL;
+
+	sc->base = mmap(target, sc->mediasize, PROT_READ|PROT_WRITE,
+	    MAP_NOCORE | MAP_NOSYNC | MAP_SHARED, sc->fd, 0);
+
+	if (sc->base == MAP_FAILED)
+		ARGV_ERR("(-spersistent) failed to mmap (%s)\n",
+		    strerror(errno));
+
+	smp_def_sign(sc, &sc->idn, 0, "SILO");
+	sc->ident = SIGN_DATA(&sc->idn);
+
+	i = smp_valid_silo(sc);
+	if (i) {
+		printf("Warning SILO (%s) not reloaded (reason=%d)\n",
+		    sc->filename, i);
+		smp_newsilo(sc);
+	}
+	AZ(smp_valid_silo(sc));
+
+	smp_metrics(sc);
+
+	parent->priv = sc;
+
+	/* XXX: only for sendfile I guess... */
+	mgt_child_inherit(sc->fd, "storage_persistent");
+}
diff --git a/bin/varnishd/storage/storage_persistent_silo.c b/bin/varnishd/storage/storage_persistent_silo.c
new file mode 100644
index 0000000..5393546
--- /dev/null
+++ b/bin/varnishd/storage/storage_persistent_silo.c
@@ -0,0 +1,524 @@
+/*-
+ * Copyright (c) 2008-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Persistent storage method
+ *
+ * XXX: Before we start the client or maybe after it stops, we should give the
+ * XXX: stevedores a chance to examine their storage for consistency.
+ *
+ */
+
+#include "config.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "cache.h"
+#include "storage/storage.h"
+
+#include "hash_slinger.h"
+#include "vsha256.h"
+#include "vtim.h"
+
+#include "persistent.h"
+#include "storage/storage_persistent.h"
+
+/*--------------------------------------------------------------------
+ * Write the segmentlist back to the silo.
+ *
+ * We write the first copy, sync it synchronously, then write the
+ * second copy and sync it synchronously.
+ *
+ * Provided the kernel doesn't lie, that means we will always have
+ * at least one valid copy on in the silo.
+ */
+
+static void
+smp_save_seg(const struct smp_sc *sc, struct smp_signctx *ctx)
+{
+	struct smp_segptr *ss;
+	struct smp_seg *sg;
+	uint64_t length;
+
+	Lck_AssertHeld(&sc->mtx);
+	smp_reset_sign(ctx);
+	ss = SIGN_DATA(ctx);
+	length = 0;
+	VTAILQ_FOREACH(sg, &sc->segments, list) {
+		assert(sg->p.offset < sc->mediasize);
+		assert(sg->p.offset + sg->p.length <= sc->mediasize);
+		*ss = sg->p;
+		ss++;
+		length += sizeof *ss;
+	}
+	smp_append_sign(ctx, SIGN_DATA(ctx), length);
+	smp_sync_sign(ctx);
+}
+
+void
+smp_save_segs(struct smp_sc *sc)
+{
+	struct smp_seg *sg, *sg2;
+
+	Lck_AssertHeld(&sc->mtx);
+
+	/*
+	 * Remove empty segments from the front of the list
+	 * before we write the segments to disk.
+	 */
+	VTAILQ_FOREACH_SAFE(sg, &sc->segments, list, sg2) {
+		if (sg->nobj > 0)
+			break;
+		if (sg == sc->cur_seg)
+			continue;
+		VTAILQ_REMOVE(&sc->segments, sg, list);
+		LRU_Free(sg->lru);
+		FREE_OBJ(sg);
+	}
+	smp_save_seg(sc, &sc->seg1);
+	smp_save_seg(sc, &sc->seg2);
+}
+
+/*--------------------------------------------------------------------
+ * Load segments
+ *
+ * The overall objective is to register the existence of an object, based
+ * only on the minimally sized struct smp_object, without causing the
+ * main object to be faulted in.
+ *
+ * XXX: We can test this by mprotecting the main body of the segment
+ * XXX: until the first fixup happens, or even just over this loop,
+ * XXX: However: the requires that the smp_objects starter further
+ * XXX: into the segment than a page so that they do not get hit
+ * XXX: by the protection.
+ */
+
+void
+smp_load_seg(const struct sess *sp, const struct smp_sc *sc,
+    struct smp_seg *sg)
+{
+	struct smp_object *so;
+	struct objcore *oc;
+	uint32_t no;
+	double t_now = VTIM_real();
+	struct smp_signctx ctx[1];
+
+	ASSERT_SILO_THREAD(sc);
+	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
+	CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
+	CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
+	assert(sg->flags & SMP_SEG_MUSTLOAD);
+	sg->flags &= ~SMP_SEG_MUSTLOAD;
+	AN(sg->p.offset);
+	if (sg->p.objlist == 0)
+		return;
+	smp_def_sign(sc, ctx, sg->p.offset, "SEGHEAD");
+	if (smp_chk_sign(ctx))
+		return;
+
+	/* test SEGTAIL */
+	/* test OBJIDX */
+	so = (void*)(sc->base + sg->p.objlist);
+	sg->objs = so;
+	no = sg->p.lobjlist;
+	/* Clear the bogus "hold" count */
+	sg->nobj = 0;
+	for (;no > 0; so++,no--) {
+		if (so->ttl == 0 || so->ttl < t_now)
+			continue;
+		HSH_Prealloc(sp);
+		oc = sp->wrk->nobjcore;
+		oc->flags |= OC_F_NEEDFIXUP | OC_F_LRUDONTMOVE;
+		oc->flags &= ~OC_F_BUSY;
+		smp_init_oc(oc, sg, no);
+		oc->ban = BAN_RefBan(oc, so->ban, sc->tailban);
+		memcpy(sp->wrk->nobjhead->digest, so->hash, SHA256_LEN);
+		(void)HSH_Insert(sp);
+		AZ(sp->wrk->nobjcore);
+		EXP_Inject(oc, sg->lru, so->ttl);
+		sg->nobj++;
+	}
+	WRK_SumStat(sp->wrk);
+	sg->flags |= SMP_SEG_LOADED;
+}
+
+/*--------------------------------------------------------------------
+ * Create a new segment
+ */
+
+void
+smp_new_seg(struct smp_sc *sc)
+{
+	struct smp_seg *sg, *sg2;
+
+	Lck_AssertHeld(&sc->mtx);
+	ALLOC_OBJ(sg, SMP_SEG_MAGIC);
+	AN(sg);
+	sg->sc = sc;
+	sg->lru = LRU_Alloc();
+	CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
+
+	/* XXX: find where it goes in silo */
+
+	sg->p.offset = sc->free_offset;
+	// XXX: align */
+	assert(sg->p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
+	assert(sg->p.offset < sc->mediasize);
+
+	sg->p.length = sc->aim_segl;
+	sg->p.length &= ~7;
+
+	if (smp_segend(sg) > sc->mediasize) {
+		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
+		sg->p.offset = sc->free_offset;
+		sg2 = VTAILQ_FIRST(&sc->segments);
+		if (smp_segend(sg) > sg2->p.offset) {
+			printf("Out of space in persistent silo\n");
+			printf("Committing suicide, restart will make space\n");
+			exit (0);
+		}
+	}
+
+
+	assert(smp_segend(sg) <= sc->mediasize);
+
+	sg2 = VTAILQ_FIRST(&sc->segments);
+	if (sg2 != NULL && sg2->p.offset > sc->free_offset) {
+		if (smp_segend(sg) > sg2->p.offset) {
+			printf("Out of space in persistent silo\n");
+			printf("Committing suicide, restart will make space\n");
+			exit (0);
+		}
+		assert(smp_segend(sg) <= sg2->p.offset);
+	}
+
+	sg->p.offset = IRNUP(sc, sg->p.offset);
+	sg->p.length = IRNDN(sc, sg->p.length);
+	sc->free_offset = sg->p.offset + sg->p.length;
+
+	VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
+
+	/* Neuter the new segment in case there is an old one there */
+	AN(sg->p.offset);
+	smp_def_sign(sc, sg->ctx, sg->p.offset, "SEGHEAD");
+	smp_reset_sign(sg->ctx);
+	smp_sync_sign(sg->ctx);
+
+	/* Set up our allocation points */
+	sc->cur_seg = sg;
+	sc->next_bot = sg->p.offset + IRNUP(sc, SMP_SIGN_SPACE);
+	sc->next_top = smp_segend(sg);
+	sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
+	IASSERTALIGN(sc, sc->next_bot);
+	IASSERTALIGN(sc, sc->next_top);
+	sg->objs = (void*)(sc->base + sc->next_top);
+}
+
+/*--------------------------------------------------------------------
+ * Close a segment
+ */
+
+void
+smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
+{
+	uint64_t left, dst, len;
+	void *dp;
+
+	Lck_AssertHeld(&sc->mtx);
+
+	assert(sg == sc->cur_seg);
+	AN(sg->p.offset);
+	sc->cur_seg = NULL;
+
+	if (sg->nalloc == 0) {
+		/* XXX: if segment is empty, delete instead */
+		VTAILQ_REMOVE(&sc->segments, sg, list);
+		free(sg);
+		return;
+	}
+
+	/*
+	 * If there is enough space left, that we can move the smp_objects
+	 * down without overwriting the present copy, we will do so to
+	 * compact the segment.
+	 */
+	left = smp_spaceleft(sc, sg);
+	len = sizeof(struct smp_object) * sg->p.lobjlist;
+	if (len < left) {
+		dst = sc->next_bot + IRNUP(sc, SMP_SIGN_SPACE);
+		dp = sc->base + dst;
+		assert((uintptr_t)dp + len < (uintptr_t)sg->objs);
+		memcpy(dp, sg->objs, len);
+		sc->next_top = dst;
+		sg->objs = dp;
+		sg->p.length = (sc->next_top - sg->p.offset)
+		     + len + IRNUP(sc, SMP_SIGN_SPACE);
+		(void)smp_spaceleft(sc, sg);	/* for the asserts */
+
+	}
+
+	/* Update the segment header */
+	sg->p.objlist = sc->next_top;
+
+	/* Write the (empty) OBJIDX signature */
+	sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
+	assert(sc->next_top >= sc->next_bot);
+	smp_def_sign(sc, sg->ctx, sc->next_top, "OBJIDX");
+	smp_reset_sign(sg->ctx);
+	smp_sync_sign(sg->ctx);
+
+	/* Write the (empty) SEGTAIL signature */
+	smp_def_sign(sc, sg->ctx,
+	    sg->p.offset + sg->p.length - IRNUP(sc, SMP_SIGN_SPACE), "SEGTAIL");
+	smp_reset_sign(sg->ctx);
+	smp_sync_sign(sg->ctx);
+
+	/* Save segment list */
+	smp_save_segs(sc);
+	sc->free_offset = smp_segend(sg);
+}
+
+
+/*---------------------------------------------------------------------
+ */
+
+static struct smp_object *
+smp_find_so(const struct smp_seg *sg, unsigned priv2)
+{
+	struct smp_object *so;
+
+	assert(priv2 > 0);
+	assert(priv2 <= sg->p.lobjlist);
+	so = &sg->objs[sg->p.lobjlist - priv2];
+	return (so);
+}
+
+/*---------------------------------------------------------------------
+ * Check if a given storage structure is valid to use
+ */
+
+static int
+smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
+    const struct storage *st)
+{
+	struct smp_seg *sg2;
+	const uint8_t *pst;
+	uint64_t o;
+
+	(void)sg;		/* XXX: faster: Start search from here */
+	pst = (const void *)st;
+
+	if (pst < (sc->base + sc->ident->stuff[SMP_SPC_STUFF]))
+		return (0x01);		/* Before silo payload start */
+	if (pst > (sc->base + sc->ident->stuff[SMP_END_STUFF]))
+		return (0x02);		/* After silo end */
+
+	o = pst - sc->base;
+
+	/* Find which segment contains the storage structure */
+	VTAILQ_FOREACH(sg2, &sc->segments, list)
+		if (o > sg2->p.offset && (o + sizeof(*st)) < sg2->p.objlist)
+			break;
+	if (sg2 == NULL)
+		return (0x04);		/* No claiming segment */
+	if (!(sg2->flags & SMP_SEG_LOADED))
+		return (0x08);		/* Claiming segment not loaded */
+
+	/* It is now safe to access the storage structure */
+	if (st->magic != STORAGE_MAGIC)
+		return (0x10);		/* Not enough magic */
+
+	if (o + st->space >= sg2->p.objlist)
+		return (0x20);		/* Allocation not inside segment */
+
+	if (st->len > st->space)
+		return (0x40);		/* Plain bad... */
+
+	/*
+	 * XXX: We could patch up st->stevedore and st->priv here
+	 * XXX: but if things go right, we will never need them.
+	 */
+	return (0);
+}
+
+/*---------------------------------------------------------------------
+ * objcore methods for persistent objects
+ */
+
+static struct object *
+smp_oc_getobj(struct worker *wrk, struct objcore *oc)
+{
+	struct object *o;
+	struct smp_seg *sg;
+	struct smp_object *so;
+	struct storage *st;
+	uint64_t l;
+	int bad;
+
+	/* Some calls are direct, but they should match anyway */
+	assert(oc->methods->getobj == smp_oc_getobj);
+
+	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	if (wrk == NULL)
+		AZ(oc->flags & OC_F_NEEDFIXUP);
+
+	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
+	so = smp_find_so(sg, oc->priv2);
+
+	o = (void*)(sg->sc->base + so->ptr);
+	/*
+	 * The object may not be in this segment since we allocate it
+	 * In a separate operation than the smp_object.  We could check
+	 * that it is in a later segment, but that would be complicated.
+	 * XXX: For now, be happy if it is inside th silo
+	 */
+	ASSERT_PTR_IN_SILO(sg->sc, o);
+	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+
+	/*
+	 * If this flag is not set, it will not be, and the lock is not
+	 * needed to test it.
+	 */
+	if (!(oc->flags & OC_F_NEEDFIXUP))
+		return (o);
+
+	AN(wrk);
+	Lck_Lock(&sg->sc->mtx);
+	/* Check again, we might have raced. */
+	if (oc->flags & OC_F_NEEDFIXUP) {
+		/* We trust caller to have a refcnt for us */
+		o->objcore = oc;
+
+		bad = 0;
+		l = 0;
+		VTAILQ_FOREACH(st, &o->store, list) {
+			bad |= smp_loaded_st(sg->sc, sg, st);
+			if (bad)
+				break;
+			l += st->len;
+		}
+		if (l != o->len)
+			bad |= 0x100;
+
+		if(bad) {
+			EXP_Set_ttl(&o->exp, -1);
+			so->ttl = 0;
+		}
+
+		sg->nfixed++;
+		wrk->stats.n_object++;
+		wrk->stats.n_vampireobject--;
+		oc->flags &= ~OC_F_NEEDFIXUP;
+	}
+	Lck_Unlock(&sg->sc->mtx);
+	EXP_Rearm(o);
+	return (o);
+}
+
+static void
+smp_oc_updatemeta(struct objcore *oc)
+{
+	struct object *o;
+	struct smp_seg *sg;
+	struct smp_object *so;
+	double mttl;
+
+	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	o = smp_oc_getobj(NULL, oc);
+	AN(o);
+
+	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
+	CHECK_OBJ_NOTNULL(sg->sc, SMP_SC_MAGIC);
+	so = smp_find_so(sg, oc->priv2);
+
+	mttl = EXP_Grace(NULL, o);
+
+	if (sg == sg->sc->cur_seg) {
+		/* Lock necessary, we might race close_seg */
+		Lck_Lock(&sg->sc->mtx);
+		so->ban = BAN_Time(oc->ban);
+		so->ttl = mttl;
+		Lck_Unlock(&sg->sc->mtx);
+	} else {
+		so->ban = BAN_Time(oc->ban);
+		so->ttl = mttl;
+	}
+}
+
+static void __match_proto__()
+smp_oc_freeobj(struct objcore *oc)
+{
+	struct smp_seg *sg;
+	struct smp_object *so;
+
+	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+
+	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
+	so = smp_find_so(sg, oc->priv2);
+
+	Lck_Lock(&sg->sc->mtx);
+	so->ttl = 0;
+	so->ptr = 0;
+
+	assert(sg->nobj > 0);
+	assert(sg->nfixed > 0);
+	sg->nobj--;
+	sg->nfixed--;
+
+	Lck_Unlock(&sg->sc->mtx);
+}
+
+/*--------------------------------------------------------------------
+ * Find the per-segment lru list for this object
+ */
+
+static struct lru *
+smp_oc_getlru(const struct objcore *oc)
+{
+	struct smp_seg *sg;
+
+	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
+	return (sg->lru);
+}
+
+static struct objcore_methods smp_oc_methods = {
+	.getobj =		smp_oc_getobj,
+	.updatemeta =		smp_oc_updatemeta,
+	.freeobj =		smp_oc_freeobj,
+	.getlru =		smp_oc_getlru,
+};
+
+/*--------------------------------------------------------------------*/
+
+void
+smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx)
+{
+
+	oc->priv = sg;
+	oc->priv2 = objidx;
+	oc->methods = &smp_oc_methods;
+}
diff --git a/bin/varnishd/storage/storage_persistent_subr.c b/bin/varnishd/storage/storage_persistent_subr.c
new file mode 100644
index 0000000..2066e72
--- /dev/null
+++ b/bin/varnishd/storage/storage_persistent_subr.c
@@ -0,0 +1,302 @@
+/*-
+ * Copyright (c) 2008-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Persistent storage method
+ *
+ * XXX: Before we start the client or maybe after it stops, we should give the
+ * XXX: stevedores a chance to examine their storage for consistency.
+ *
+ * XXX: Do we ever free the LRU-lists ?
+ */
+
+#include "config.h"
+
+#include <sys/mman.h>
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "cache.h"
+#include "storage/storage.h"
+
+#include "vsha256.h"
+
+#include "persistent.h"
+#include "storage/storage_persistent.h"
+
+/*--------------------------------------------------------------------
+ * SIGNATURE functions
+ * The signature is SHA256 over:
+ *    1. The smp_sign struct up to but not including the length field.
+ *    2. smp_sign->length bytes, starting after the smp_sign structure
+ *    3. The smp-sign->length field.
+ * The signature is stored after the byte-range from step 2.
+ */
+
+/*--------------------------------------------------------------------
+ * Define a signature by location and identifier.
+ */
+
+void
+smp_def_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
+    uint64_t off, const char *id)
+{
+
+	AZ(off & 7);			/* Alignment */
+	assert(strlen(id) < sizeof ctx->ss->ident);
+
+	memset(ctx, 0, sizeof *ctx);
+	ctx->ss = (void*)(sc->base + off);
+	ctx->unique = sc->unique;
+	ctx->id = id;
+}
+
+/*--------------------------------------------------------------------
+ * Check that a signature is good, leave state ready for append
+ */
+int
+smp_chk_sign(struct smp_signctx *ctx)
+{
+	struct SHA256Context cx;
+	unsigned char sign[SHA256_LEN];
+	int r = 0;
+
+	if (strncmp(ctx->id, ctx->ss->ident, sizeof ctx->ss->ident))
+		r = 1;
+	else if (ctx->unique != ctx->ss->unique)
+		r = 2;
+	else if ((uintptr_t)ctx->ss != ctx->ss->mapped)
+		r = 3;
+	else {
+		SHA256_Init(&ctx->ctx);
+		SHA256_Update(&ctx->ctx, ctx->ss,
+		    offsetof(struct smp_sign, length));
+		SHA256_Update(&ctx->ctx, SIGN_DATA(ctx), ctx->ss->length);
+		cx = ctx->ctx;
+		SHA256_Update(&cx, &ctx->ss->length, sizeof(ctx->ss->length));
+		SHA256_Final(sign, &cx);
+		if (memcmp(sign, SIGN_END(ctx), sizeof sign))
+			r = 4;
+	}
+	if (r) {
+		fprintf(stderr, "CHK(%p %s %p %s) = %d\n",
+		    ctx, ctx->id, ctx->ss,
+		    r > 1 ? ctx->ss->ident : "<invalid>", r);
+	}
+	return (r);
+}
+
+/*--------------------------------------------------------------------
+ * Append data to a signature
+ */
+void
+smp_append_sign(struct smp_signctx *ctx, const void *ptr, uint32_t len)
+{
+	struct SHA256Context cx;
+	unsigned char sign[SHA256_LEN];
+
+	if (len != 0) {
+		SHA256_Update(&ctx->ctx, ptr, len);
+		ctx->ss->length += len;
+	}
+	cx = ctx->ctx;
+	SHA256_Update(&cx, &ctx->ss->length, sizeof(ctx->ss->length));
+	SHA256_Final(sign, &cx);
+	memcpy(SIGN_END(ctx), sign, sizeof sign);
+XXXAZ(smp_chk_sign(ctx));
+}
+
+/*--------------------------------------------------------------------
+ * Reset a signature to empty, prepare for appending.
+ */
+
+void
+smp_reset_sign(struct smp_signctx *ctx)
+{
+
+	memset(ctx->ss, 0, sizeof *ctx->ss);
+	strcpy(ctx->ss->ident, ctx->id);
+	ctx->ss->unique = ctx->unique;
+	ctx->ss->mapped = (uintptr_t)ctx->ss;
+	SHA256_Init(&ctx->ctx);
+	SHA256_Update(&ctx->ctx, ctx->ss,
+	    offsetof(struct smp_sign, length));
+	smp_append_sign(ctx, NULL, 0);
+}
+
+/*--------------------------------------------------------------------
+ * Force a write of a signature block to the backing store.
+ */
+
+void
+smp_sync_sign(const struct smp_signctx *ctx)
+{
+	int i;
+
+	/* XXX: round to pages */
+	i = msync((void*)ctx->ss, ctx->ss->length + SHA256_LEN, MS_SYNC);
+	if (i && 0)
+		fprintf(stderr, "SyncSign(%p %s) = %d %s\n",
+		    ctx->ss, ctx->id, i, strerror(errno));
+}
+
+/*--------------------------------------------------------------------
+ * Create and force a new signature to backing store
+ */
+
+static void
+smp_new_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
+    uint64_t off, const char *id)
+{
+	smp_def_sign(sc, ctx, off, id);
+	smp_reset_sign(ctx);
+	smp_sync_sign(ctx);
+}
+
+/*--------------------------------------------------------------------
+ * Initialize a Silo with a valid but empty structure.
+ *
+ * XXX: more intelligent sizing of things.
+ */
+
+void
+smp_newsilo(struct smp_sc *sc)
+{
+	struct smp_ident	*si;
+
+	ASSERT_MGT();
+	assert(strlen(SMP_IDENT_STRING) < sizeof si->ident);
+
+	/* Choose a new random number */
+	sc->unique = random();
+
+	smp_reset_sign(&sc->idn);
+	si = sc->ident;
+
+	memset(si, 0, sizeof *si);
+	strcpy(si->ident, SMP_IDENT_STRING);
+	si->byte_order = 0x12345678;
+	si->size = sizeof *si;
+	si->major_version = 2;
+	si->unique = sc->unique;
+	si->mediasize = sc->mediasize;
+	si->granularity = sc->granularity;
+	/*
+	 * Aim for cache-line-width
+	 */
+	si->align = sizeof(void*) * 2;
+	sc->align = si->align;
+
+	si->stuff[SMP_BAN1_STUFF] = sc->granularity;
+	si->stuff[SMP_BAN2_STUFF] = si->stuff[SMP_BAN1_STUFF] + 1024*1024;
+	si->stuff[SMP_SEG1_STUFF] = si->stuff[SMP_BAN2_STUFF] + 1024*1024;
+	si->stuff[SMP_SEG2_STUFF] = si->stuff[SMP_SEG1_STUFF] + 1024*1024;
+	si->stuff[SMP_SPC_STUFF] = si->stuff[SMP_SEG2_STUFF] + 1024*1024;
+	si->stuff[SMP_END_STUFF] = si->mediasize;
+	assert(si->stuff[SMP_SPC_STUFF] < si->stuff[SMP_END_STUFF]);
+
+	smp_new_sign(sc, &sc->ban1, si->stuff[SMP_BAN1_STUFF], "BAN 1");
+	smp_new_sign(sc, &sc->ban2, si->stuff[SMP_BAN2_STUFF], "BAN 2");
+	smp_new_sign(sc, &sc->seg1, si->stuff[SMP_SEG1_STUFF], "SEG 1");
+	smp_new_sign(sc, &sc->seg2, si->stuff[SMP_SEG2_STUFF], "SEG 2");
+
+	smp_append_sign(&sc->idn, si, sizeof *si);
+	smp_sync_sign(&sc->idn);
+}
+
+/*--------------------------------------------------------------------
+ * Check if a silo is valid.
+ */
+
+int
+smp_valid_silo(struct smp_sc *sc)
+{
+	struct smp_ident	*si;
+	int i, j;
+
+	assert(strlen(SMP_IDENT_STRING) < sizeof si->ident);
+
+	i = smp_chk_sign(&sc->idn);
+	if (i)
+		return (i);
+
+	si = sc->ident;
+	if (strcmp(si->ident, SMP_IDENT_STRING))
+		return (12);
+	if (si->byte_order != 0x12345678)
+		return (13);
+	if (si->size != sizeof *si)
+		return (14);
+	if (si->major_version != 2)
+		return (15);
+	if (si->mediasize != sc->mediasize)
+		return (17);
+	if (si->granularity != sc->granularity)
+		return (18);
+	if (si->align < sizeof(void*))
+		return (19);
+	if (!PWR2(si->align))
+		return (20);
+	sc->align = si->align;
+	sc->unique = si->unique;
+
+	/* XXX: Sanity check stuff[6] */
+
+	assert(si->stuff[SMP_BAN1_STUFF] > sizeof *si + SHA256_LEN);
+	assert(si->stuff[SMP_BAN2_STUFF] > si->stuff[SMP_BAN1_STUFF]);
+	assert(si->stuff[SMP_SEG1_STUFF] > si->stuff[SMP_BAN2_STUFF]);
+	assert(si->stuff[SMP_SEG2_STUFF] > si->stuff[SMP_SEG1_STUFF]);
+	assert(si->stuff[SMP_SPC_STUFF] > si->stuff[SMP_SEG2_STUFF]);
+	assert(si->stuff[SMP_END_STUFF] == sc->mediasize);
+
+	assert(smp_stuff_len(sc, SMP_SEG1_STUFF) > 65536);
+	assert(smp_stuff_len(sc, SMP_SEG1_STUFF) ==
+	  smp_stuff_len(sc, SMP_SEG2_STUFF));
+
+	assert(smp_stuff_len(sc, SMP_BAN1_STUFF) > 65536);
+	assert(smp_stuff_len(sc, SMP_BAN1_STUFF) ==
+	  smp_stuff_len(sc, SMP_BAN2_STUFF));
+
+	smp_def_sign(sc, &sc->ban1, si->stuff[SMP_BAN1_STUFF], "BAN 1");
+	smp_def_sign(sc, &sc->ban2, si->stuff[SMP_BAN2_STUFF], "BAN 2");
+	smp_def_sign(sc, &sc->seg1, si->stuff[SMP_SEG1_STUFF], "SEG 1");
+	smp_def_sign(sc, &sc->seg2, si->stuff[SMP_SEG2_STUFF], "SEG 2");
+
+	/* We must have one valid BAN table */
+	i = smp_chk_sign(&sc->ban1);
+	j = smp_chk_sign(&sc->ban2);
+	if (i && j)
+		return (100 + i * 10 + j);
+
+	/* We must have one valid SEG table */
+	i = smp_chk_sign(&sc->seg1);
+	j = smp_chk_sign(&sc->seg2);
+	if (i && j)
+		return (200 + i * 10 + j);
+	return (0);
+}
diff --git a/bin/varnishd/storage/storage_synth.c b/bin/varnishd/storage/storage_synth.c
new file mode 100644
index 0000000..0519738
--- /dev/null
+++ b/bin/varnishd/storage/storage_synth.c
@@ -0,0 +1,120 @@
+/*-
+ * Copyright (c) 2008-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Storage method for synthetic content, based on vsb.
+ */
+
+#include "config.h"
+
+#include <stdlib.h>
+
+#include "cache.h"
+#include "storage/storage.h"
+
+
+static struct lock		sms_mtx;
+
+static void
+sms_free(struct storage *sto)
+{
+
+	CHECK_OBJ_NOTNULL(sto, STORAGE_MAGIC);
+	Lck_Lock(&sms_mtx);
+	VSC_C_main->sms_nobj--;
+	VSC_C_main->sms_nbytes -= sto->len;
+	VSC_C_main->sms_bfree += sto->len;
+	Lck_Unlock(&sms_mtx);
+	VSB_delete(sto->priv);
+	free(sto);
+}
+
+void
+SMS_Init(void)
+{
+
+	Lck_New(&sms_mtx, lck_sms);
+}
+
+static struct stevedore sms_stevedore = {
+	.magic	=	STEVEDORE_MAGIC,
+	.name	=	"synth",
+	.free	=	sms_free,
+};
+
+struct vsb *
+SMS_Makesynth(struct object *obj)
+{
+	struct storage *sto;
+	struct vsb *vsb;
+
+	CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC);
+	STV_Freestore(obj);
+	obj->len = 0;
+
+	Lck_Lock(&sms_mtx);
+	VSC_C_main->sms_nreq++;
+	VSC_C_main->sms_nobj++;
+	Lck_Unlock(&sms_mtx);
+
+	sto = calloc(sizeof *sto, 1);
+	XXXAN(sto);
+	vsb = VSB_new_auto();
+	XXXAN(vsb);
+	sto->priv = vsb;
+	sto->len = 0;
+	sto->space = 0;
+#ifdef SENDFILE_WORKS
+	sto->fd = -1;
+#endif
+	sto->stevedore = &sms_stevedore;
+	sto->magic = STORAGE_MAGIC;
+
+	VTAILQ_INSERT_TAIL(&obj->store, sto, list);
+	return (vsb);
+}
+
+void
+SMS_Finish(struct object *obj)
+{
+	struct storage *sto;
+	struct vsb *vsb;
+
+	CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC);
+	sto = VTAILQ_FIRST(&obj->store);
+	assert(sto->stevedore == &sms_stevedore);
+	vsb = sto->priv;
+	AZ(VSB_finish(vsb));
+
+	sto->ptr = (void*)VSB_data(vsb);
+	sto->len = VSB_len(vsb);
+	sto->space = VSB_len(vsb);
+	obj->len = sto->len;
+	Lck_Lock(&sms_mtx);
+	VSC_C_main->sms_nbytes += sto->len;
+	VSC_C_main->sms_balloc += sto->len;
+	Lck_Unlock(&sms_mtx);
+}
diff --git a/bin/varnishd/storage/storage_umem.c b/bin/varnishd/storage/storage_umem.c
new file mode 100644
index 0000000..78110d4
--- /dev/null
+++ b/bin/varnishd/storage/storage_umem.c
@@ -0,0 +1,166 @@
+/*-
+ * Copyright (c) 2006 Verdens Gang AS
+ * Copyright (c) 2006-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Storage method based on umem_alloc(3MALLOC)
+ */
+
+#include "config.h"
+
+#ifdef HAVE_LIBUMEM
+
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <umem.h>
+
+#include "cache.h"
+#include "storage/storage.h"
+
+static size_t			smu_max = SIZE_MAX;
+static MTX			smu_mtx;
+
+struct smu {
+	struct storage		s;
+	size_t			sz;
+};
+
+static struct storage *
+smu_alloc(struct stevedore *st, size_t size)
+{
+	struct smu *smu;
+
+	Lck_Lock(&smu_mtx);
+	VSC_C_main->sma_nreq++;
+	if (VSC_C_main->sma_nbytes + size > smu_max)
+		size = 0;
+	else {
+		VSC_C_main->sma_nobj++;
+		VSC_C_main->sma_nbytes += size;
+		VSC_C_main->sma_balloc += size;
+	}
+	Lck_Unlock(&smu_mtx);
+
+	if (size == 0)
+		return (NULL);
+
+	smu = umem_zalloc(sizeof *smu, UMEM_DEFAULT);
+	if (smu == NULL)
+		return (NULL);
+	smu->sz = size;
+	smu->s.priv = smu;
+	smu->s.ptr = umem_alloc(size, UMEM_DEFAULT);
+	XXXAN(smu->s.ptr);
+	smu->s.len = 0;
+	smu->s.space = size;
+	smu->s.fd = -1;
+	smu->s.stevedore = st;
+	smu->s.magic = STORAGE_MAGIC;
+	return (&smu->s);
+}
+
+static void
+smu_free(struct storage *s)
+{
+	struct smu *smu;
+
+	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
+	smu = s->priv;
+	assert(smu->sz == smu->s.space);
+	Lck_Lock(&smu_mtx);
+	VSC_C_main->sma_nobj--;
+	VSC_C_main->sma_nbytes -= smu->sz;
+	VSC_C_main->sma_bfree += smu->sz;
+	Lck_Unlock(&smu_mtx);
+	umem_free(smu->s.ptr, smu->s.space);
+	umem_free(smu, sizeof *smu);
+}
+
+static void
+smu_trim(const struct storage *s, size_t size)
+{
+	struct smu *smu;
+	void *p;
+
+	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
+	smu = s->priv;
+	assert(smu->sz == smu->s.space);
+	if ((p = umem_alloc(size, UMEM_DEFAULT)) != NULL) {
+		memcpy(p, smu->s.ptr, size);
+		umem_free(smu->s.ptr, smu->s.space);
+		Lck_Lock(&smu_mtx);
+		VSC_C_main->sma_nbytes -= (smu->sz - size);
+		VSC_C_main->sma_bfree += smu->sz - size;
+		smu->sz = size;
+		Lck_Unlock(&smu_mtx);
+		smu->s.ptr = p;
+		smu->s.space = size;
+	}
+}
+
+static void
+smu_init(struct stevedore *parent, int ac, char * const *av)
+{
+	const char *e;
+	uintmax_t u;
+
+	(void)parent;
+
+	AZ(av[ac]);
+	if (ac > 1)
+		ARGV_ERR("(-sumem) too many arguments\n");
+
+	if (ac == 0 || *av[0] == '\0')
+		 return;
+
+	e = VNUM_2bytes(av[0], &u, 0);
+	if (e != NULL)
+		ARGV_ERR("(-sumem) size \"%s\": %s\n", av[0], e);
+	if ((u != (uintmax_t)(size_t)u))
+		ARGV_ERR("(-sumem) size \"%s\": too big\n", av[0]);
+	smu_max = u;
+}
+
+static void
+smu_open(const struct stevedore *st)
+{
+	(void)st;
+	AZ(pthread_mutex_init(&smu_mtx, NULL));
+}
+
+const struct stevedore smu_stevedore = {
+	.magic	=	STEVEDORE_MAGIC,
+	.name	=	"umem",
+	.init	=	smu_init,
+	.open	=	smu_open,
+	.alloc	=	smu_alloc,
+	.free	=	smu_free,
+	.trim	=	smu_trim,
+};
+
+#endif /* HAVE_UMEM_H */
diff --git a/bin/varnishd/storage_file.c b/bin/varnishd/storage_file.c
deleted file mode 100644
index ecfc4ff..0000000
--- a/bin/varnishd/storage_file.c
+++ /dev/null
@@ -1,615 +0,0 @@
-/*-
- * Copyright (c) 2006 Verdens Gang AS
- * Copyright (c) 2006-2010 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Storage method based on mmap'ed file
- */
-
-#include "config.h"
-
-#include <sys/mman.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "cache.h"
-#include "storage.h"
-
-#include "vnum.h"
-
-#ifndef MAP_NOCORE
-#define MAP_NOCORE 0 /* XXX Linux */
-#endif
-
-#ifndef MAP_NOSYNC
-#define MAP_NOSYNC 0 /* XXX Linux */
-#endif
-
-#define MINPAGES		128
-
-/*
- * Number of buckets on free-list.
- *
- * Last bucket is "larger than" so choose number so that the second
- * to last bucket matches the 128k CHUNKSIZE in cache_fetch.c when
- * using the a 4K minimal page size
- */
-#define NBUCKET			(128 / 4 + 1)
-
-/*--------------------------------------------------------------------*/
-
-VTAILQ_HEAD(smfhead, smf);
-
-struct smf {
-	unsigned		magic;
-#define SMF_MAGIC		0x0927a8a0
-	struct storage		s;
-	struct smf_sc		*sc;
-
-	int			alloc;
-
-	off_t			size;
-	off_t			offset;
-	unsigned char		*ptr;
-
-	VTAILQ_ENTRY(smf)	order;
-	VTAILQ_ENTRY(smf)	status;
-	struct smfhead		*flist;
-};
-
-struct smf_sc {
-	unsigned		magic;
-#define SMF_SC_MAGIC		0x52962ee7
-	struct lock		mtx;
-	struct VSC_C_smf	*stats;
-
-	const char		*filename;
-	int			fd;
-	unsigned		pagesize;
-	uintmax_t		filesize;
-	struct smfhead		order;
-	struct smfhead		free[NBUCKET];
-	struct smfhead		used;
-};
-
-/*--------------------------------------------------------------------*/
-
-static void
-smf_initfile(struct smf_sc *sc, const char *size)
-{
-	sc->filesize = STV_FileSize(sc->fd, size, &sc->pagesize, "-sfile");
-
-	AZ(ftruncate(sc->fd, (off_t)sc->filesize));
-
-	/* XXX: force block allocation here or in open ? */
-}
-
-static const char default_size[] = "100M";
-static const char default_filename[] = ".";
-
-static void
-smf_init(struct stevedore *parent, int ac, char * const *av)
-{
-	const char *size, *fn, *r;
-	struct smf_sc *sc;
-	unsigned u;
-	uintmax_t page_size;
-
-	AZ(av[ac]);
-
-	fn = default_filename;
-	size = default_size;
-	page_size = getpagesize();
-
-	if (ac > 3)
-		ARGV_ERR("(-sfile) too many arguments\n");
-	if (ac > 0 && *av[0] != '\0')
-		fn = av[0];
-	if (ac > 1 && *av[1] != '\0')
-		size = av[1];
-	if (ac > 2 && *av[2] != '\0') {
-
-		r = VNUM_2bytes(av[2], &page_size, 0);
-		if (r != NULL)
-			ARGV_ERR("(-sfile) granularity \"%s\": %s\n", av[2], r);
-	}
-
-	AN(fn);
-	AN(size);
-
-	ALLOC_OBJ(sc, SMF_SC_MAGIC);
-	XXXAN(sc);
-	VTAILQ_INIT(&sc->order);
-	for (u = 0; u < NBUCKET; u++)
-		VTAILQ_INIT(&sc->free[u]);
-	VTAILQ_INIT(&sc->used);
-	sc->pagesize = page_size;
-
-	parent->priv = sc;
-
-	(void)STV_GetFile(fn, &sc->fd, &sc->filename, "-sfile");
-
-	mgt_child_inherit(sc->fd, "storage_file");
-	smf_initfile(sc, size);
-}
-
-/*--------------------------------------------------------------------
- * Insert/Remove from correct freelist
- */
-
-static void
-insfree(struct smf_sc *sc, struct smf *sp)
-{
-	size_t b;
-	struct smf *sp2;
-	size_t ns;
-
-	assert(sp->alloc == 0);
-	assert(sp->flist == NULL);
-	Lck_AssertHeld(&sc->mtx);
-	b = sp->size / sc->pagesize;
-	if (b >= NBUCKET) {
-		b = NBUCKET - 1;
-		sc->stats->g_smf_large++;
-	} else {
-		sc->stats->g_smf_frag++;
-	}
-	sp->flist = &sc->free[b];
-	ns = b * sc->pagesize;
-	VTAILQ_FOREACH(sp2, sp->flist, status) {
-		assert(sp2->size >= ns);
-		assert(sp2->alloc == 0);
-		assert(sp2->flist == sp->flist);
-		if (sp->offset < sp2->offset)
-			break;
-	}
-	if (sp2 == NULL)
-		VTAILQ_INSERT_TAIL(sp->flist, sp, status);
-	else
-		VTAILQ_INSERT_BEFORE(sp2, sp, status);
-}
-
-static void
-remfree(const struct smf_sc *sc, struct smf *sp)
-{
-	size_t b;
-
-	assert(sp->alloc == 0);
-	assert(sp->flist != NULL);
-	Lck_AssertHeld(&sc->mtx);
-	b = sp->size / sc->pagesize;
-	if (b >= NBUCKET) {
-		b = NBUCKET - 1;
-		sc->stats->g_smf_large--;
-	} else {
-		sc->stats->g_smf_frag--;
-	}
-	assert(sp->flist == &sc->free[b]);
-	VTAILQ_REMOVE(sp->flist, sp, status);
-	sp->flist = NULL;
-}
-
-/*--------------------------------------------------------------------
- * Allocate a range from the first free range that is large enough.
- */
-
-static struct smf *
-alloc_smf(struct smf_sc *sc, size_t bytes)
-{
-	struct smf *sp, *sp2;
-	size_t b;
-
-	assert(!(bytes % sc->pagesize));
-	b = bytes / sc->pagesize;
-	if (b >= NBUCKET)
-		b = NBUCKET - 1;
-	for (sp = NULL; b < NBUCKET - 1; b++) {
-		sp = VTAILQ_FIRST(&sc->free[b]);
-		if (sp != NULL)
-			break;
-	}
-	if (sp == NULL) {
-		VTAILQ_FOREACH(sp, &sc->free[NBUCKET -1], status)
-			if (sp->size >= bytes)
-				break;
-	}
-	if (sp == NULL)
-		return (sp);
-
-	assert(sp->size >= bytes);
-	remfree(sc, sp);
-
-	if (sp->size == bytes) {
-		sp->alloc = 1;
-		VTAILQ_INSERT_TAIL(&sc->used, sp, status);
-		return (sp);
-	}
-
-	/* Split from front */
-	sp2 = malloc(sizeof *sp2);
-	XXXAN(sp2);
-	sc->stats->g_smf++;
-	*sp2 = *sp;
-
-	sp->offset += bytes;
-	sp->ptr += bytes;
-	sp->size -= bytes;
-
-	sp2->size = bytes;
-	sp2->alloc = 1;
-	VTAILQ_INSERT_BEFORE(sp, sp2, order);
-	VTAILQ_INSERT_TAIL(&sc->used, sp2, status);
-	insfree(sc, sp);
-	return (sp2);
-}
-
-/*--------------------------------------------------------------------
- * Free a range.  Attempt merge forward and backward, then sort into
- * free list according to age.
- */
-
-static void
-free_smf(struct smf *sp)
-{
-	struct smf *sp2;
-	struct smf_sc *sc = sp->sc;
-
-	CHECK_OBJ_NOTNULL(sp, SMF_MAGIC);
-	assert(sp->alloc != 0);
-	assert(sp->size > 0);
-	assert(!(sp->size % sc->pagesize));
-	VTAILQ_REMOVE(&sc->used, sp, status);
-	sp->alloc = 0;
-
-	sp2 = VTAILQ_NEXT(sp, order);
-	if (sp2 != NULL &&
-	    sp2->alloc == 0 &&
-	    (sp2->ptr == sp->ptr + sp->size) &&
-	    (sp2->offset == sp->offset + sp->size)) {
-		sp->size += sp2->size;
-		VTAILQ_REMOVE(&sc->order, sp2, order);
-		remfree(sc, sp2);
-		free(sp2);
-		sc->stats->g_smf--;
-	}
-
-	sp2 = VTAILQ_PREV(sp, smfhead, order);
-	if (sp2 != NULL &&
-	    sp2->alloc == 0 &&
-	    (sp->ptr == sp2->ptr + sp2->size) &&
-	    (sp->offset == sp2->offset + sp2->size)) {
-		remfree(sc, sp2);
-		sp2->size += sp->size;
-		VTAILQ_REMOVE(&sc->order, sp, order);
-		free(sp);
-		sc->stats->g_smf--;
-		sp = sp2;
-	}
-
-	insfree(sc, sp);
-}
-
-/*--------------------------------------------------------------------
- * Trim the tail of a range.
- */
-
-static void
-trim_smf(struct smf *sp, size_t bytes)
-{
-	struct smf *sp2;
-	struct smf_sc *sc = sp->sc;
-
-	assert(sp->alloc != 0);
-	assert(bytes > 0);
-	assert(bytes < sp->size);
-	assert(!(bytes % sc->pagesize));
-	assert(!(sp->size % sc->pagesize));
-	CHECK_OBJ_NOTNULL(sp, SMF_MAGIC);
-	sp2 = malloc(sizeof *sp2);
-	XXXAN(sp2);
-	sc->stats->g_smf++;
-	*sp2 = *sp;
-
-	sp2->size -= bytes;
-	sp->size = bytes;
-	sp2->ptr += bytes;
-	sp2->offset += bytes;
-	VTAILQ_INSERT_AFTER(&sc->order, sp, sp2, order);
-	VTAILQ_INSERT_TAIL(&sc->used, sp2, status);
-	free_smf(sp2);
-}
-
-/*--------------------------------------------------------------------
- * Insert a newly created range as busy, then free it to do any collapses
- */
-
-static void
-new_smf(struct smf_sc *sc, unsigned char *ptr, off_t off, size_t len)
-{
-	struct smf *sp, *sp2;
-
-	assert(!(len % sc->pagesize));
-	sp = calloc(sizeof *sp, 1);
-	XXXAN(sp);
-	sp->magic = SMF_MAGIC;
-	sp->s.magic = STORAGE_MAGIC;
-	sc->stats->g_smf++;
-
-	sp->sc = sc;
-	sp->size = len;
-	sp->ptr = ptr;
-	sp->offset = off;
-	sp->alloc = 1;
-
-	VTAILQ_FOREACH(sp2, &sc->order, order) {
-		if (sp->ptr < sp2->ptr) {
-			VTAILQ_INSERT_BEFORE(sp2, sp, order);
-			break;
-		}
-	}
-	if (sp2 == NULL)
-		VTAILQ_INSERT_TAIL(&sc->order, sp, order);
-
-	VTAILQ_INSERT_HEAD(&sc->used, sp, status);
-
-	free_smf(sp);
-}
-
-/*--------------------------------------------------------------------*/
-
-/*
- * XXX: This may be too aggressive and soak up too much address room.
- * XXX: On the other hand, the user, directly or implicitly asked us to
- * XXX: use this much storage, so we should make a decent effort.
- * XXX: worst case (I think), malloc will fail.
- */
-
-static void
-smf_open_chunk(struct smf_sc *sc, off_t sz, off_t off, off_t *fail, off_t *sum)
-{
-	void *p;
-	off_t h;
-
-	assert(sz != 0);
-	assert(!(sz % sc->pagesize));
-
-	if (*fail < (uintmax_t)sc->pagesize * MINPAGES)
-		return;
-
-	if (sz > 0 && sz < *fail && sz < SSIZE_MAX) {
-		p = mmap(NULL, sz, PROT_READ|PROT_WRITE,
-		    MAP_NOCORE | MAP_NOSYNC | MAP_SHARED, sc->fd, off);
-		if (p != MAP_FAILED) {
-			(void) madvise(p, sz, MADV_RANDOM);
-			(*sum) += sz;
-			new_smf(sc, p, off, sz);
-			return;
-		}
-	}
-
-	if (sz < *fail)
-		*fail = sz;
-
-	h = sz / 2;
-	if (h > SSIZE_MAX)
-		h = SSIZE_MAX;
-	h -= (h % sc->pagesize);
-
-	smf_open_chunk(sc, h, off, fail, sum);
-	smf_open_chunk(sc, sz - h, off + h, fail, sum);
-}
-
-static void
-smf_open(const struct stevedore *st)
-{
-	struct smf_sc *sc;
-	off_t fail = 1 << 30;	/* XXX: where is OFF_T_MAX ? */
-	off_t sum = 0;
-
-	CAST_OBJ_NOTNULL(sc, st->priv, SMF_SC_MAGIC);
-	sc->stats = VSM_Alloc(sizeof *sc->stats,
-	    VSC_CLASS, VSC_TYPE_SMF, st->ident);
-	Lck_New(&sc->mtx, lck_smf);
-	Lck_Lock(&sc->mtx);
-	smf_open_chunk(sc, sc->filesize, 0, &fail, &sum);
-	Lck_Unlock(&sc->mtx);
-	printf("SMF.%s mmap'ed %ju bytes of %ju\n",
-	    st->ident, (uintmax_t)sum, sc->filesize);
-
-	/* XXX */
-	if (sum < MINPAGES * (off_t)getpagesize())
-		exit (2);
-
-	sc->stats->g_space += sc->filesize;
-}
-
-/*--------------------------------------------------------------------*/
-
-static struct storage *
-smf_alloc(struct stevedore *st, size_t size)
-{
-	struct smf *smf;
-	struct smf_sc *sc;
-
-	CAST_OBJ_NOTNULL(sc, st->priv, SMF_SC_MAGIC);
-	assert(size > 0);
-	size += (sc->pagesize - 1);
-	size &= ~(sc->pagesize - 1);
-	Lck_Lock(&sc->mtx);
-	sc->stats->c_req++;
-	smf = alloc_smf(sc, size);
-	if (smf == NULL) {
-		sc->stats->c_fail++;
-		Lck_Unlock(&sc->mtx);
-		return (NULL);
-	}
-	CHECK_OBJ_NOTNULL(smf, SMF_MAGIC);
-	sc->stats->g_alloc++;
-	sc->stats->c_bytes += smf->size;
-	sc->stats->g_bytes += smf->size;
-	sc->stats->g_space -= smf->size;
-	Lck_Unlock(&sc->mtx);
-	CHECK_OBJ_NOTNULL(&smf->s, STORAGE_MAGIC);	/*lint !e774 */
-	XXXAN(smf);
-	assert(smf->size == size);
-	smf->s.space = size;
-	smf->s.priv = smf;
-	smf->s.ptr = smf->ptr;
-	smf->s.len = 0;
-	smf->s.stevedore = st;
-#ifdef SENDFILE_WORKS
-	smf->s.fd = smf->sc->fd;
-	smf->s.where = smf->offset;
-#endif
-	return (&smf->s);
-}
-
-/*--------------------------------------------------------------------*/
-
-static void
-smf_trim(struct storage *s, size_t size)
-{
-	struct smf *smf;
-	struct smf_sc *sc;
-
-	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
-	assert(size > 0);
-	assert(size <= s->space);
-	xxxassert(size > 0);	/* XXX: seen */
-	CAST_OBJ_NOTNULL(smf, s->priv, SMF_MAGIC);
-	assert(size <= smf->size);
-	sc = smf->sc;
-	size += (sc->pagesize - 1);
-	size &= ~(sc->pagesize - 1);
-	if (smf->size > size) {
-		Lck_Lock(&sc->mtx);
-		sc->stats->c_freed += (smf->size - size);
-		sc->stats->g_bytes -= (smf->size - size);
-		sc->stats->g_space += (smf->size - size);
-		trim_smf(smf, size);
-		assert(smf->size == size);
-		Lck_Unlock(&sc->mtx);
-		s->space = size;
-	}
-}
-
-/*--------------------------------------------------------------------*/
-
-static void __match_proto__(storage_free_f)
-smf_free(struct storage *s)
-{
-	struct smf *smf;
-	struct smf_sc *sc;
-
-	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
-	CAST_OBJ_NOTNULL(smf, s->priv, SMF_MAGIC);
-	sc = smf->sc;
-	Lck_Lock(&sc->mtx);
-	sc->stats->g_alloc--;
-	sc->stats->c_freed += smf->size;
-	sc->stats->g_bytes -= smf->size;
-	sc->stats->g_space += smf->size;
-	free_smf(smf);
-	Lck_Unlock(&sc->mtx);
-}
-
-/*--------------------------------------------------------------------*/
-
-const struct stevedore smf_stevedore = {
-	.magic	=	STEVEDORE_MAGIC,
-	.name	=	"file",
-	.init	=	smf_init,
-	.open	=	smf_open,
-	.alloc	=	smf_alloc,
-	.trim	=	smf_trim,
-	.free	=	smf_free,
-};
-
-#ifdef INCLUDE_TEST_DRIVER
-
-void vca_flush(struct sess *sp) {}
-
-#define N	100
-#define M	(128*1024)
-
-struct storage *s[N];
-
-static void
-dumpit(void)
-{
-	struct smf_sc *sc = smf_stevedore.priv;
-	struct smf *s;
-
-	return (0);
-	printf("----------------\n");
-	printf("Order:\n");
-	VTAILQ_FOREACH(s, &sc->order, order) {
-		printf("%10p %12ju %12ju %12ju\n",
-		    s, s->offset, s->size, s->offset + s->size);
-	}
-	printf("Used:\n");
-	VTAILQ_FOREACH(s, &sc->used, status) {
-		printf("%10p %12ju %12ju %12ju\n",
-		    s, s->offset, s->size, s->offset + s->size);
-	}
-	printf("Free:\n");
-	VTAILQ_FOREACH(s, &sc->free, status) {
-		printf("%10p %12ju %12ju %12ju\n",
-		    s, s->offset, s->size, s->offset + s->size);
-	}
-	printf("================\n");
-}
-
-int
-main(int argc, char **argv)
-{
-	int i, j;
-
-	setbuf(stdout, NULL);
-	smf_init(&smf_stevedore, "");
-	smf_open(&smf_stevedore);
-	while (1) {
-		dumpit();
-		i = random() % N;
-		do
-			j = random() % M;
-		while (j == 0);
-		if (s[i] == NULL) {
-			s[i] = smf_alloc(&smf_stevedore, j);
-			printf("A %10p %12d\n", s[i], j);
-		} else if (j < s[i]->space) {
-			smf_trim(s[i], j);
-			printf("T %10p %12d\n", s[i], j);
-		} else {
-			smf_free(s[i]);
-			printf("D %10p\n", s[i]);
-			s[i] = NULL;
-		}
-	}
-}
-
-#endif /* INCLUDE_TEST_DRIVER */
diff --git a/bin/varnishd/storage_malloc.c b/bin/varnishd/storage_malloc.c
deleted file mode 100644
index 967137c..0000000
--- a/bin/varnishd/storage_malloc.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*-
- * Copyright (c) 2006 Verdens Gang AS
- * Copyright (c) 2006-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Storage method based on malloc(3)
- */
-
-#include "config.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "cache.h"
-#include "storage.h"
-
-#include "vnum.h"
-
-struct sma_sc {
-	unsigned		magic;
-#define SMA_SC_MAGIC		0x1ac8a345
-	struct lock		sma_mtx;
-	size_t			sma_max;
-	size_t			sma_alloc;
-	struct VSC_C_sma	*stats;
-};
-
-struct sma {
-	unsigned		magic;
-#define SMA_MAGIC		0x69ae9bb9
-	struct storage		s;
-	size_t			sz;
-	struct sma_sc		*sc;
-};
-
-static struct storage *
-sma_alloc(struct stevedore *st, size_t size)
-{
-	struct sma_sc *sma_sc;
-	struct sma *sma = NULL;
-	void *p;
-
-	CAST_OBJ_NOTNULL(sma_sc, st->priv, SMA_SC_MAGIC);
-	Lck_Lock(&sma_sc->sma_mtx);
-	sma_sc->stats->c_req++;
-	if (sma_sc->sma_alloc + size > sma_sc->sma_max) {
-		sma_sc->stats->c_fail += size;
-		size = 0;
-	} else {
-		sma_sc->sma_alloc += size;
-		sma_sc->stats->c_bytes += size;
-		sma_sc->stats->g_alloc++;
-		sma_sc->stats->g_bytes += size;
-		if (sma_sc->sma_max != SIZE_MAX)
-			sma_sc->stats->g_space -= size;
-	}
-	Lck_Unlock(&sma_sc->sma_mtx);
-
-	if (size == 0)
-		return (NULL);
-
-	/*
-	 * Do not collaps the sma allocation with sma->s.ptr: it is not
-	 * a good idea.  Not only would it make ->trim impossible,
-	 * performance-wise it would be a catastropy with chunksized
-	 * allocations growing another full page, just to accomodate the sma.
-	 */
-
-	p = malloc(size);
-	if (p != NULL) {
-		ALLOC_OBJ(sma, SMA_MAGIC);
-		if (sma != NULL)
-			sma->s.ptr = p;
-		else
-			free(p);
-	}
-	if (sma == NULL) {
-		Lck_Lock(&sma_sc->sma_mtx);
-		/*
-		 * XXX: Not nice to have counters go backwards, but we do
-		 * XXX: Not want to pick up the lock twice just for stats.
-		 */
-		sma_sc->stats->c_fail++;
-		sma_sc->stats->c_bytes -= size;
-		sma_sc->stats->g_alloc--;
-		sma_sc->stats->g_bytes -= size;
-		if (sma_sc->sma_max != SIZE_MAX)
-			sma_sc->stats->g_space += size;
-		Lck_Unlock(&sma_sc->sma_mtx);
-		return (NULL);
-	}
-	sma->sc = sma_sc;
-	sma->sz = size;
-	sma->s.priv = sma;
-	sma->s.len = 0;
-	sma->s.space = size;
-#ifdef SENDFILE_WORKS
-	sma->s.fd = -1;
-#endif
-	sma->s.stevedore = st;
-	sma->s.magic = STORAGE_MAGIC;
-	return (&sma->s);
-}
-
-static void __match_proto__(storage_free_f)
-sma_free(struct storage *s)
-{
-	struct sma_sc *sma_sc;
-	struct sma *sma;
-
-	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
-	CAST_OBJ_NOTNULL(sma, s->priv, SMA_MAGIC);
-	sma_sc = sma->sc;
-	assert(sma->sz == sma->s.space);
-	Lck_Lock(&sma_sc->sma_mtx);
-	sma_sc->sma_alloc -= sma->sz;
-	sma_sc->stats->g_alloc--;
-	sma_sc->stats->g_bytes -= sma->sz;
-	sma_sc->stats->c_freed += sma->sz;
-	if (sma_sc->sma_max != SIZE_MAX)
-		sma_sc->stats->g_space += sma->sz;
-	Lck_Unlock(&sma_sc->sma_mtx);
-	free(sma->s.ptr);
-	free(sma);
-}
-
-static void
-sma_trim(struct storage *s, size_t size)
-{
-	struct sma_sc *sma_sc;
-	struct sma *sma;
-	void *p;
-	size_t delta;
-
-	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
-	CAST_OBJ_NOTNULL(sma, s->priv, SMA_MAGIC);
-	sma_sc = sma->sc;
-
-	assert(sma->sz == sma->s.space);
-	assert(size < sma->sz);
-	delta = sma->sz - size;
-	if (delta < 256)
-		return;
-	if ((p = realloc(sma->s.ptr, size)) != NULL) {
-		Lck_Lock(&sma_sc->sma_mtx);
-		sma_sc->sma_alloc -= delta;
-		sma_sc->stats->g_bytes -= delta;
-		sma_sc->stats->c_freed += delta;
-		if (sma_sc->sma_max != SIZE_MAX)
-			sma_sc->stats->g_space += delta;
-		sma->sz = size;
-		Lck_Unlock(&sma_sc->sma_mtx);
-		sma->s.ptr = p;
-		s->space = size;
-	}
-}
-
-static double
-sma_used_space(const struct stevedore *st)
-{
-	struct sma_sc *sma_sc;
-
-	CAST_OBJ_NOTNULL(sma_sc, st->priv, SMA_SC_MAGIC);
-	return (sma_sc->sma_alloc);
-}
-
-static double
-sma_free_space(const struct stevedore *st)
-{
-	struct sma_sc *sma_sc;
-
-	CAST_OBJ_NOTNULL(sma_sc, st->priv, SMA_SC_MAGIC);
-	return (sma_sc->sma_max - sma_sc->sma_alloc);
-}
-
-static void
-sma_init(struct stevedore *parent, int ac, char * const *av)
-{
-	const char *e;
-	uintmax_t u;
-	struct sma_sc *sc;
-
-	ASSERT_MGT();
-	ALLOC_OBJ(sc, SMA_SC_MAGIC);
-	AN(sc);
-	sc->sma_max = SIZE_MAX;
-	assert(sc->sma_max == SIZE_MAX);
-	parent->priv = sc;
-
-	AZ(av[ac]);
-	if (ac > 1)
-		ARGV_ERR("(-smalloc) too many arguments\n");
-
-	if (ac == 0 || *av[0] == '\0')
-		 return;
-
-	e = VNUM_2bytes(av[0], &u, 0);
-	if (e != NULL)
-		ARGV_ERR("(-smalloc) size \"%s\": %s\n", av[0], e);
-	if ((u != (uintmax_t)(size_t)u))
-		ARGV_ERR("(-smalloc) size \"%s\": too big\n", av[0]);
-	if (u < 1024*1024)
-		ARGV_ERR("(-smalloc) size \"%s\": too small, "
-			 "did you forget to specify M or G?\n", av[0]);
-
-	sc->sma_max = u;
-}
-
-static void
-sma_open(const struct stevedore *st)
-{
-	struct sma_sc *sma_sc;
-
-	CAST_OBJ_NOTNULL(sma_sc, st->priv, SMA_SC_MAGIC);
-	Lck_New(&sma_sc->sma_mtx, lck_sma);
-	sma_sc->stats = VSM_Alloc(sizeof *sma_sc->stats,
-	    VSC_CLASS, VSC_TYPE_SMA, st->ident);
-	memset(sma_sc->stats, 0, sizeof *sma_sc->stats);
-	if (sma_sc->sma_max != SIZE_MAX)
-		sma_sc->stats->g_space = sma_sc->sma_max;
-}
-
-const struct stevedore sma_stevedore = {
-	.magic	=	STEVEDORE_MAGIC,
-	.name	=	"malloc",
-	.init	=	sma_init,
-	.open	=	sma_open,
-	.alloc	=	sma_alloc,
-	.free	=	sma_free,
-	.trim	=	sma_trim,
-	.var_free_space =	sma_free_space,
-	.var_used_space =	sma_used_space,
-};
diff --git a/bin/varnishd/storage_persistent.c b/bin/varnishd/storage_persistent.c
deleted file mode 100644
index abe25f6..0000000
--- a/bin/varnishd/storage_persistent.c
+++ /dev/null
@@ -1,678 +0,0 @@
-/*-
- * Copyright (c) 2008-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Persistent storage method
- *
- * XXX: Before we start the client or maybe after it stops, we should give the
- * XXX: stevedores a chance to examine their storage for consistency.
- *
- * XXX: Do we ever free the LRU-lists ?
- */
-
-#include "config.h"
-
-#include <sys/param.h>
-#include <sys/mman.h>
-
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "cache.h"
-#include "storage.h"
-
-#include "hash_slinger.h"
-#include "vcli.h"
-#include "vcli_priv.h"
-#include "vend.h"
-#include "vsha256.h"
-
-#include "persistent.h"
-#include "storage_persistent.h"
-
-/*--------------------------------------------------------------------*/
-
-/*
- * silos is unlocked, it only changes during startup when we are
- * single-threaded
- */
-static VTAILQ_HEAD(,smp_sc)	silos = VTAILQ_HEAD_INITIALIZER(silos);
-
-/*--------------------------------------------------------------------
- * Add bans to silos
- */
-
-static void
-smp_appendban(struct smp_sc *sc, struct smp_signctx *ctx,
-    uint32_t len, const uint8_t *ban)
-{
-	uint8_t *ptr, *ptr2;
-
-	(void)sc;
-	ptr = ptr2 = SIGN_END(ctx);
-
-	memcpy(ptr, "BAN", 4);
-	ptr += 4;
-
-	vbe32enc(ptr, len);
-	ptr += 4;
-
-	memcpy(ptr, ban, len);
-	ptr += len;
-
-	smp_append_sign(ctx, ptr2, ptr - ptr2);
-}
-
-/* Trust that cache_ban.c takes care of locking */
-
-void
-SMP_NewBan(const uint8_t *ban, unsigned ln)
-{
-	struct smp_sc *sc;
-
-	VTAILQ_FOREACH(sc, &silos, list) {
-		smp_appendban(sc, &sc->ban1, ln, ban);
-		smp_appendban(sc, &sc->ban2, ln, ban);
-	}
-}
-
-/*--------------------------------------------------------------------
- * Attempt to open and read in a ban list
- */
-
-static int
-smp_open_bans(struct smp_sc *sc, struct smp_signctx *ctx)
-{
-	uint8_t *ptr, *pe;
-	uint32_t length;
-	int i, retval = 0;
-
-	ASSERT_CLI();
-	(void)sc;
-	i = smp_chk_sign(ctx);
-	if (i)
-		return (i);
-	ptr = SIGN_DATA(ctx);
-	pe = ptr + ctx->ss->length;
-
-	while (ptr < pe) {
-		if (memcmp(ptr, "BAN", 4)) {
-			retval = 1001;
-			break;
-		}
-		ptr += 4;
-
-		length = vbe32dec(ptr);
-		ptr += 4;
-
-		if (ptr + length > pe) {
-			retval = 1003;
-			break;
-		}
-
-		BAN_Reload(ptr, length);
-
-		ptr += length;
-	}
-	assert(ptr <= pe);
-	return (retval);
-}
-
-/*--------------------------------------------------------------------
- * Attempt to open and read in a segment list
- */
-
-static int
-smp_open_segs(struct smp_sc *sc, struct smp_signctx *ctx)
-{
-	uint64_t length, l;
-	struct smp_segptr *ss, *se;
-	struct smp_seg *sg, *sg1, *sg2;
-	int i, n = 0;
-
-	ASSERT_CLI();
-	i = smp_chk_sign(ctx);
-	if (i)
-		return (i);
-
-	ss = SIGN_DATA(ctx);
-	length = ctx->ss->length;
-
-	if (length == 0) {
-		/* No segments */
-		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
-		return (0);
-	}
-	se = ss + length / sizeof *ss;
-	se--;
-	assert(ss <= se);
-
-	/*
-	 * Locate the free reserve, there are only two basic cases,
-	 * but once we start dropping segments, things gets more complicated.
-	 */
-
-	sc->free_offset = se->offset + se->length;
-	l = sc->mediasize - sc->free_offset;
-	if (se->offset > ss->offset && l >= sc->free_reserve) {
-		/*
-		 * [__xxxxyyyyzzzz___]
-		 * Plenty of space at tail, do nothing.
-		 */
-	} else if (ss->offset > se->offset) {
-		/*
-		 * [zzzz____xxxxyyyy_]
-		 * (make) space between ends
-		 * We might nuke the entire tail end without getting
-		 * enough space, in which case we fall through to the
-		 * last check.
-		 */
-		while (ss < se && ss->offset > se->offset) {
-			l = ss->offset - (se->offset + se->length);
-			if (l > sc->free_reserve)
-				break;
-			ss++;
-			n++;
-		}
-	}
-
-	if (l < sc->free_reserve) {
-		/*
-		 * [__xxxxyyyyzzzz___]
-		 * (make) space at front
-		 */
-		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
-		while (ss < se) {
-			l = ss->offset - sc->free_offset;
-			if (l > sc->free_reserve)
-				break;
-			ss++;
-			n++;
-		}
-	}
-
-	assert (l >= sc->free_reserve);
-
-
-	sg1 = NULL;
-	sg2 = NULL;
-	for(; ss <= se; ss++) {
-		ALLOC_OBJ(sg, SMP_SEG_MAGIC);
-		AN(sg);
-		sg->lru = LRU_Alloc();
-		CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
-		sg->p = *ss;
-
-		sg->flags |= SMP_SEG_MUSTLOAD;
-
-		/*
-		 * HACK: prevent save_segs from nuking segment until we have
-		 * HACK: loaded it.
-		 */
-		sg->nobj = 1;
-		if (sg1 != NULL) {
-			assert(sg1->p.offset != sg->p.offset);
-			if (sg1->p.offset < sg->p.offset)
-				assert(smp_segend(sg1) <= sg->p.offset);
-			else
-				assert(smp_segend(sg) <= sg1->p.offset);
-		}
-		if (sg2 != NULL) {
-			assert(sg2->p.offset != sg->p.offset);
-			if (sg2->p.offset < sg->p.offset)
-				assert(smp_segend(sg2) <= sg->p.offset);
-			else
-				assert(smp_segend(sg) <= sg2->p.offset);
-		}
-
-		/* XXX: check that they are inside silo */
-		/* XXX: check that they don't overlap */
-		/* XXX: check that they are serial */
-		sg->sc = sc;
-		VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
-		sg2 = sg;
-		if (sg1 == NULL)
-			sg1 = sg;
-	}
-	printf("Dropped %d segments to make free_reserve\n", n);
-	return (0);
-}
-
-/*--------------------------------------------------------------------
- * Silo worker thread
- */
-
-static void *
-smp_thread(struct sess *sp, void *priv)
-{
-	struct smp_sc	*sc;
-	struct smp_seg *sg;
-
-	(void)sp;
-	CAST_OBJ_NOTNULL(sc, priv, SMP_SC_MAGIC);
-
-	/* First, load all the objects from all segments */
-	VTAILQ_FOREACH(sg, &sc->segments, list)
-		if (sg->flags & SMP_SEG_MUSTLOAD)
-			smp_load_seg(sp, sc, sg);
-
-	sc->flags |= SMP_SC_LOADED;
-	BAN_TailDeref(&sc->tailban);
-	AZ(sc->tailban);
-	printf("Silo completely loaded\n");
-	while (1) {
-		(void)sleep (1);
-		sg = VTAILQ_FIRST(&sc->segments);
-		if (sg != NULL && sg -> sc->cur_seg &&
-		    sg->nobj == 0) {
-			Lck_Lock(&sc->mtx);
-			smp_save_segs(sc);
-			Lck_Unlock(&sc->mtx);
-		}
-	}
-	NEEDLESS_RETURN(NULL);
-}
-
-/*--------------------------------------------------------------------
- * Open a silo in the worker process
- */
-
-static void
-smp_open(const struct stevedore *st)
-{
-	struct smp_sc	*sc;
-
-	ASSERT_CLI();
-
-	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
-
-	Lck_New(&sc->mtx, lck_smp);
-	Lck_Lock(&sc->mtx);
-
-	sc->stevedore = st;
-
-	/* We trust the parent to give us a valid silo, for good measure: */
-	AZ(smp_valid_silo(sc));
-
-	AZ(mprotect(sc->base, 4096, PROT_READ));
-
-	sc->ident = SIGN_DATA(&sc->idn);
-
-	/* We attempt ban1 first, and if that fails, try ban2 */
-	if (smp_open_bans(sc, &sc->ban1))
-		AZ(smp_open_bans(sc, &sc->ban2));
-
-	/* We attempt seg1 first, and if that fails, try seg2 */
-	if (smp_open_segs(sc, &sc->seg1))
-		AZ(smp_open_segs(sc, &sc->seg2));
-
-	/*
-	 * Grap a reference to the tail of the ban list, until the thread
-	 * has loaded all objects, so we can be sure that all of our
-	 * proto-bans survive until then.
-	 */
-	sc->tailban = BAN_TailRef();
-	AN(sc->tailban);
-
-	/* XXX: save segments to ensure consistency between seg1 & seg2 ? */
-
-	/* XXX: abandon early segments to make sure we have free space ? */
-
-	/* Open a new segment, so we are ready to write */
-	smp_new_seg(sc);
-
-	/* Start the worker silo worker thread, it will load the objects */
-	WRK_BgThread(&sc->thread, "persistence", smp_thread, sc);
-
-	VTAILQ_INSERT_TAIL(&silos, sc, list);
-	Lck_Unlock(&sc->mtx);
-}
-
-/*--------------------------------------------------------------------
- * Close a silo
- */
-
-static void
-smp_close(const struct stevedore *st)
-{
-	struct smp_sc	*sc;
-
-	ASSERT_CLI();
-
-	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
-	Lck_Lock(&sc->mtx);
-	smp_close_seg(sc, sc->cur_seg);
-	Lck_Unlock(&sc->mtx);
-
-	/* XXX: reap thread */
-}
-
-/*--------------------------------------------------------------------
- * Allocate a bite.
- *
- * Allocate [min_size...max_size] space from the bottom of the segment,
- * as is convenient.
- *
- * If 'so' + 'idx' is given, also allocate a smp_object from the top
- * of the segment.
- *
- * Return the segment in 'ssg' if given.
- */
-
-static struct storage *
-smp_allocx(struct stevedore *st, size_t min_size, size_t max_size,
-    struct smp_object **so, unsigned *idx, struct smp_seg **ssg)
-{
-	struct smp_sc *sc;
-	struct storage *ss;
-	struct smp_seg *sg;
-	unsigned tries;
-	uint64_t left, extra;
-
-	CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
-	assert(min_size <= max_size);
-
-	max_size = IRNUP(sc, max_size);
-	min_size = IRNUP(sc, min_size);
-
-	extra = IRNUP(sc, sizeof(*ss));
-	if (so != NULL) {
-		extra += sizeof(**so);
-		AN(idx);
-	}
-
-	Lck_Lock(&sc->mtx);
-	sg = NULL;
-	ss = NULL;
-	for (tries = 0; tries < 3; tries++) {
-		left = smp_spaceleft(sc, sc->cur_seg);
-		if (left >= extra + min_size)
-			break;
-		smp_close_seg(sc, sc->cur_seg);
-		smp_new_seg(sc);
-	}
-	if (left >= extra + min_size)  {
-		if (left < extra + max_size)
-			max_size = IRNDN(sc, left - extra);
-
-		sg = sc->cur_seg;
-		ss = (void*)(sc->base + sc->next_bot);
-		sc->next_bot += max_size + IRNUP(sc, sizeof(*ss));
-		sg->nalloc++;
-		if (so != NULL) {
-			sc->next_top -= sizeof(**so);
-			*so = (void*)(sc->base + sc->next_top);
-			/* Render this smp_object mostly harmless */
-			(*so)->ttl = 0.;
-			(*so)->ban = 0.;
-			(*so)->ptr = 0;;
-			sg->objs = *so;
-			*idx = ++sg->p.lobjlist;
-		}
-		(void)smp_spaceleft(sc, sg);	/* for the assert */
-	}
-	Lck_Unlock(&sc->mtx);
-
-	if (ss == NULL)
-		return (ss);
-	AN(sg);
-	assert(max_size >= min_size);
-
-	/* Fill the storage structure */
-	memset(ss, 0, sizeof *ss);
-	ss->magic = STORAGE_MAGIC;
-	ss->ptr = PRNUP(sc, ss + 1);
-	ss->space = max_size;
-	ss->priv = sc;
-	ss->stevedore = st;
-#ifdef SENDFILE_WORKS
-	ss->fd = sc->fd;
-#endif
-	if (ssg != NULL)
-		*ssg = sg;
-	return (ss);
-}
-
-/*--------------------------------------------------------------------
- * Allocate an object
- */
-
-static struct object *
-smp_allocobj(struct stevedore *stv, struct sess *sp, unsigned ltot,
-    const struct stv_objsecrets *soc)
-{
-	struct object *o;
-	struct storage *st;
-	struct smp_sc	*sc;
-	struct smp_seg *sg;
-	struct smp_object *so;
-	struct objcore *oc;
-	unsigned objidx;
-
-	if (sp->objcore == NULL)
-		return (NULL);		/* from cnt_error */
-	CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC);
-	AN(sp->objcore);
-	AN(sp->wrk->exp.ttl > 0.);
-
-	ltot = IRNUP(sc, ltot);
-
-	st = smp_allocx(stv, ltot, ltot, &so, &objidx, &sg);
-	if (st == NULL)
-		return (NULL);
-
-	assert(st->space >= ltot);
-	ltot = st->len = st->space;
-
-	o = STV_MkObject(sp, st->ptr, ltot, soc);
-	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
-	o->objstore = st;
-
-	oc = o->objcore;
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	oc->flags |= OC_F_LRUDONTMOVE;
-
-	Lck_Lock(&sc->mtx);
-	sg->nfixed++;
-	sg->nobj++;
-
-	/* We have to do this somewhere, might as well be here... */
-	assert(sizeof so->hash == DIGEST_LEN);
-	memcpy(so->hash, oc->objhead->digest, DIGEST_LEN);
-	so->ttl = EXP_Grace(NULL, o);
-	so->ptr = (uint8_t*)o - sc->base;
-	so->ban = BAN_Time(oc->ban);
-
-	smp_init_oc(oc, sg, objidx);
-
-	Lck_Unlock(&sc->mtx);
-	return (o);
-}
-
-/*--------------------------------------------------------------------
- * Allocate a bite
- */
-
-static struct storage *
-smp_alloc(struct stevedore *st, size_t size)
-{
-
-	return (smp_allocx(st,
-	    size > 4096 ? 4096 : size, size, NULL, NULL, NULL));
-}
-
-/*--------------------------------------------------------------------
- * Trim a bite
- * XXX: We could trim the last allocation.
- */
-
-static void
-smp_trim(struct storage *ss, size_t size)
-{
-
-	(void)ss;
-	(void)size;
-}
-
-/*--------------------------------------------------------------------
- * We don't track frees of storage, we track the objects which own the
- * storage and when there are no more objects in in the first segment,
- * it can be reclaimed.
- * XXX: We could free the last allocation, but does that happen ?
- */
-
-static void __match_proto__(storage_free_f)
-smp_free(struct storage *st)
-{
-
-	/* XXX */
-	(void)st;
-}
-
-
-/*--------------------------------------------------------------------*/
-
-const struct stevedore smp_stevedore = {
-	.magic	=	STEVEDORE_MAGIC,
-	.name	=	"persistent",
-	.init	=	smp_mgt_init,
-	.open	=	smp_open,
-	.close	=	smp_close,
-	.alloc	=	smp_alloc,
-	.allocobj =	smp_allocobj,
-	.free	=	smp_free,
-	.trim	=	smp_trim,
-};
-
-/*--------------------------------------------------------------------
- * Persistence is a bear to test unadultered, so we cheat by adding
- * a cli command we can use to make it do tricks for us.
- */
-
-static void
-debug_report_silo(struct cli *cli, const struct smp_sc *sc, int objs)
-{
-	struct smp_seg *sg;
-	struct objcore *oc;
-
-	VCLI_Out(cli, "Silo: %s (%s)\n",
-	    sc->stevedore->ident, sc->filename);
-	VTAILQ_FOREACH(sg, &sc->segments, list) {
-		VCLI_Out(cli, "  Seg: [0x%jx ... +0x%jx]\n",
-		   (uintmax_t)sg->p.offset, (uintmax_t)sg->p.length);
-		if (sg == sc->cur_seg)
-			VCLI_Out(cli,
-			   "    Alloc: [0x%jx ... 0x%jx] = 0x%jx free\n",
-			   (uintmax_t)(sc->next_bot),
-			   (uintmax_t)(sc->next_top),
-			   (uintmax_t)(sc->next_top - sc->next_bot));
-		VCLI_Out(cli, "    %u nobj, %u alloc, %u lobjlist, %u fixed\n",
-		    sg->nobj, sg->nalloc, sg->p.lobjlist, sg->nfixed);
-		if (objs) {
-			VTAILQ_FOREACH(oc, &sg->lru->lru_head, lru_list)
-				VCLI_Out(cli, "      OC %p\n", oc);
-		}
-	}
-}
-
-static void
-debug_persistent(struct cli *cli, const char * const * av, void *priv)
-{
-	struct smp_sc *sc;
-
-	(void)priv;
-
-	if (av[2] == NULL) {
-		VTAILQ_FOREACH(sc, &silos, list)
-			debug_report_silo(cli, sc, 0);
-		return;
-	}
-	VTAILQ_FOREACH(sc, &silos, list)
-		if (!strcmp(av[2], sc->stevedore->ident))
-			break;
-	if (sc == NULL) {
-		VCLI_Out(cli, "Silo <%s> not found\n", av[2]);
-		VCLI_SetResult(cli, CLIS_PARAM);
-		return;
-	}
-	if (av[3] == NULL) {
-		debug_report_silo(cli, sc, 0);
-		return;
-	}
-	Lck_Lock(&sc->mtx);
-	if (!strcmp(av[3], "sync")) {
-		smp_close_seg(sc, sc->cur_seg);
-		smp_new_seg(sc);
-	} else if (!strcmp(av[3], "dump")) {
-		debug_report_silo(cli, sc, 1);
-	} else {
-		VCLI_Out(cli, "Unknown operation\n");
-		VCLI_SetResult(cli, CLIS_PARAM);
-	}
-	Lck_Unlock(&sc->mtx);
-}
-
-static struct cli_proto debug_cmds[] = {
-        { "debug.persistent", "debug.persistent",
-                "Persistent debugging magic:\n"
-		"\tdebug.persistent [stevedore [cmd]]\n"
-		"With no cmd arg, a summary of the silo is returned.\n"
-		"Possible commands:\n"
-		"\tsync\tClose current segment, open a new one\n"
-		"\tdump\tinclude objcores in silo summary\n"
-		"",
-		0, 2, "d", debug_persistent },
-        { NULL }
-};
-
-/*--------------------------------------------------------------------*/
-
-void
-SMP_Init(void)
-{
-	CLI_AddFuncs(debug_cmds);
-}
-
-/*--------------------------------------------------------------------
- * Pause until all silos have loaded.
- */
-
-void
-SMP_Ready(void)
-{
-	struct smp_sc *sc;
-
-	ASSERT_CLI();
-	do {
-		VTAILQ_FOREACH(sc, &silos, list)
-			if (!(sc->flags & SMP_SC_LOADED))
-				break;
-		if (sc != NULL)
-			(void)sleep(1);
-	} while (sc != NULL);
-}
diff --git a/bin/varnishd/storage_persistent.h b/bin/varnishd/storage_persistent.h
deleted file mode 100644
index 84f3d21..0000000
--- a/bin/varnishd/storage_persistent.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/*-
- * Copyright (c) 2008-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Persistent storage method
- *
- * XXX: Before we start the client or maybe after it stops, we should give the
- * XXX: stevedores a chance to examine their storage for consistency.
- *
- * XXX: Do we ever free the LRU-lists ?
- */
-
-#define ASSERT_SILO_THREAD(sc) \
-    do {assert(pthread_self() == (sc)->thread);} while (0)
-
-#define OC_F_NEEDFIXUP OC_F_PRIV
-
-/*
- * Context for a signature.
- *
- * A signature is a sequence of bytes in the silo, signed by a SHA256 hash
- * which follows the bytes.
- *
- * The context structure allows us to append to a signature without
- * recalculating the entire SHA256 hash.
- */
-
-struct smp_signctx {
-	struct smp_sign		*ss;
-	struct SHA256Context	ctx;
-	uint32_t		unique;
-	const char		*id;
-};
-
-struct smp_sc;
-
-/* XXX: name confusion with on-media version ? */
-struct smp_seg {
-	unsigned		magic;
-#define SMP_SEG_MAGIC		0x45c61895
-
-	struct smp_sc		*sc;
-	struct lru		*lru;
-
-	VTAILQ_ENTRY(smp_seg)	list;		/* on smp_sc.smp_segments */
-
-	struct smp_segptr	p;
-
-	unsigned		flags;
-#define SMP_SEG_MUSTLOAD	(1 << 0)
-#define SMP_SEG_LOADED		(1 << 1)
-
-	uint32_t		nobj;		/* Number of objects */
-	uint32_t		nalloc;		/* Allocations */
-	uint32_t		nfixed;		/* How many fixed objects */
-
-	/* Only for open segment */
-	struct smp_object	*objs;		/* objdesc array */
-	struct smp_signctx	ctx[1];
-};
-
-VTAILQ_HEAD(smp_seghead, smp_seg);
-
-struct smp_sc {
-	unsigned		magic;
-#define SMP_SC_MAGIC		0x7b73af0a
-	struct stevedore	*parent;
-
-	unsigned		flags;
-#define SMP_SC_LOADED		(1 << 0)
-
-	const struct stevedore	*stevedore;
-	int			fd;
-	const char		*filename;
-	off_t			mediasize;
-	uintptr_t		align;
-	uint32_t		granularity;
-	uint32_t		unique;
-
-	uint8_t			*base;
-
-	struct smp_ident	*ident;
-
-	struct smp_seghead	segments;
-	struct smp_seg		*cur_seg;
-	uint64_t		next_bot;	/* next alloc address bottom */
-	uint64_t		next_top;	/* next alloc address top */
-
-	uint64_t		free_offset;
-
-	pthread_t		thread;
-
-	VTAILQ_ENTRY(smp_sc)	list;
-
-	struct smp_signctx	idn;
-	struct smp_signctx	ban1;
-	struct smp_signctx	ban2;
-	struct smp_signctx	seg1;
-	struct smp_signctx	seg2;
-
-	struct ban		*tailban;
-
-	struct lock		mtx;
-
-	/* Cleaner metrics */
-
-	unsigned		min_nseg;
-	unsigned		aim_nseg;
-	unsigned		max_nseg;
-
-	uint64_t		min_segl;
-	uint64_t		aim_segl;
-	uint64_t		max_segl;
-
-	uint64_t		free_reserve;
-};
-
-/*--------------------------------------------------------------------*/
-
-/* Pointer round up/down & assert */
-#define PRNDN(sc, x)	((void*)RDN2((uintptr_t)(x), sc->align))
-#define PRNUP(sc, x)	((void*)RUP2((uintptr_t)(x), sc->align))
-#define PASSERTALIGN(sc, x)	assert(PRNDN(sc, x) == (x))
-
-/* Integer round up/down & assert */
-#define IRNDN(sc, x)	RDN2(x, sc->align)
-#define IRNUP(sc, x)	RUP2(x, sc->align)
-#define IASSERTALIGN(sc, x)	assert(IRNDN(sc, x) == (x))
-
-/*--------------------------------------------------------------------*/
-
-#define ASSERT_PTR_IN_SILO(sc, ptr) \
-	assert((const void*)(ptr) >= (const void*)((sc)->base) && \
-	    (const void*)(ptr) < (const void *)((sc)->base + (sc)->mediasize))
-
-/*--------------------------------------------------------------------*/
-
-#define SIGN_DATA(ctx)	((void *)((ctx)->ss + 1))
-#define SIGN_END(ctx)	((void *)((int8_t *)SIGN_DATA(ctx) + (ctx)->ss->length))
-
-/* storage_persistent_mgt.c */
-
-void smp_mgt_init(struct stevedore *parent, int ac, char * const *av);
-
-/* storage_persistent_silo.c */
-
-void smp_load_seg(const struct sess *sp, const struct smp_sc *sc,
-    struct smp_seg *sg);
-void smp_new_seg(struct smp_sc *sc);
-void smp_close_seg(struct smp_sc *sc, struct smp_seg *sg);
-void smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx);
-void smp_save_segs(struct smp_sc *sc);
-
-/* storage_persistent_subr.c */
-
-void smp_def_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
-    uint64_t off, const char *id);
-int smp_chk_sign(struct smp_signctx *ctx);
-void smp_append_sign(struct smp_signctx *ctx, const void *ptr, uint32_t len);
-void smp_reset_sign(struct smp_signctx *ctx);
-void smp_sync_sign(const struct smp_signctx *ctx);
-void smp_newsilo(struct smp_sc *sc);
-int smp_valid_silo(struct smp_sc *sc);
-
-/*--------------------------------------------------------------------
- * Caculate payload of some stuff
- */
-
-static inline uint64_t
-smp_stuff_len(const struct smp_sc *sc, unsigned stuff)
-{
-	uint64_t l;
-
-	assert(stuff < SMP_END_STUFF);
-	l = sc->ident->stuff[stuff + 1] - sc->ident->stuff[stuff];
-	l -= SMP_SIGN_SPACE;
-	return (l);
-}
-
-static inline uint64_t
-smp_segend(const struct smp_seg *sg)
-{
-
-	return (sg->p.offset + sg->p.length);
-}
-
-static inline uint64_t
-smp_spaceleft(const struct smp_sc *sc, const struct smp_seg *sg)
-{
-
-	IASSERTALIGN(sc, sc->next_bot);
-	assert(sc->next_bot <= sc->next_top - IRNUP(sc, SMP_SIGN_SPACE));
-	assert(sc->next_bot >= sg->p.offset);
-	assert(sc->next_top < sg->p.offset + sg->p.length);
-	return ((sc->next_top - sc->next_bot) - IRNUP(sc, SMP_SIGN_SPACE));
-}
diff --git a/bin/varnishd/storage_persistent_mgt.c b/bin/varnishd/storage_persistent_mgt.c
deleted file mode 100644
index 2cdcc6b..0000000
--- a/bin/varnishd/storage_persistent_mgt.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*-
- * Copyright (c) 2008-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Persistent storage method
- *
- * XXX: Before we start the client or maybe after it stops, we should give the
- * XXX: stevedores a chance to examine their storage for consistency.
- *
- * XXX: Do we ever free the LRU-lists ?
- */
-
-#include "config.h"
-
-#include <sys/mman.h>
-
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "cache.h"
-#include "storage.h"
-
-#include "vsha256.h"
-
-#include "persistent.h"
-#include "storage_persistent.h"
-
-#ifndef MAP_NOCORE
-#define MAP_NOCORE 0 /* XXX Linux */
-#endif
-
-#ifndef MAP_NOSYNC
-#define MAP_NOSYNC 0 /* XXX Linux */
-#endif
-
-/*--------------------------------------------------------------------
- * Calculate cleaner metrics from silo dimensions
- */
-
-static void
-smp_metrics(struct smp_sc *sc)
-{
-
-	/*
-	 * We do not want to loose too big chunks of the silos
-	 * content when we are forced to clean a segment.
-	 *
-	 * For now insist that a segment covers no more than 1% of the silo.
-	 *
-	 * XXX: This should possibly depend on the size of the silo so
-	 * XXX: trivially small silos do not run into trouble along
-	 * XXX: the lines of "one object per segment".
-	 */
-
-	sc->min_nseg = 10;
-	sc->max_segl = smp_stuff_len(sc, SMP_SPC_STUFF) / sc->min_nseg;
-
-	fprintf(stderr, "min_nseg = %u, max_segl = %ju\n",
-	    sc->min_nseg, (uintmax_t)sc->max_segl);
-
-	/*
-	 * The number of segments are limited by the size of the segment
-	 * table(s) and from that follows the minimum size of a segmement.
-	 */
-
-	sc->max_nseg = smp_stuff_len(sc, SMP_SEG1_STUFF) / sc->min_nseg;
-	sc->min_segl = smp_stuff_len(sc, SMP_SPC_STUFF) / sc->max_nseg;
-
-	while (sc->min_segl < sizeof(struct object)) {
-		sc->max_nseg /= 2;
-		sc->min_segl = smp_stuff_len(sc, SMP_SPC_STUFF) / sc->max_nseg;
-	}
-
-	fprintf(stderr, "max_nseg = %u, min_segl = %ju\n",
-	    sc->max_nseg, (uintmax_t)sc->min_segl);
-
-	/*
-	 * Set our initial aim point at the exponential average of the
-	 * two extremes.
-	 *
-	 * XXX: This is a pretty arbitrary choice, but having no idea
-	 * XXX: object count, size distribution or ttl pattern at this
-	 * XXX: point, we have to do something.
-	 */
-
-	sc->aim_nseg =
-	   (unsigned) exp((log(sc->min_nseg) + log(sc->max_nseg))*.5);
-	sc->aim_segl = smp_stuff_len(sc, SMP_SPC_STUFF) / sc->aim_nseg;
-
-	fprintf(stderr, "aim_nseg = %u, aim_segl = %ju\n",
-	    sc->aim_nseg, (uintmax_t)sc->aim_segl);
-
-	/*
-	 * How much space in the free reserve pool ?
-	 */
-	sc->free_reserve = sc->aim_segl * 10;
-
-	fprintf(stderr, "free_reserve = %ju\n", (uintmax_t)sc->free_reserve);
-}
-
-/*--------------------------------------------------------------------
- * Set up persistent storage silo in the master process.
- */
-
-void
-smp_mgt_init(struct stevedore *parent, int ac, char * const *av)
-{
-	struct smp_sc		*sc;
-	struct smp_sign		sgn;
-	void *target;
-	int i;
-
-	ASSERT_MGT();
-
-	AZ(av[ac]);
-#define SIZOF(foo)       fprintf(stderr, \
-    "sizeof(%s) = %zu = 0x%zx\n", #foo, sizeof(foo), sizeof(foo));
-	SIZOF(struct smp_ident);
-	SIZOF(struct smp_sign);
-	SIZOF(struct smp_segptr);
-	SIZOF(struct smp_object);
-#undef SIZOF
-
-	/* See comments in persistent.h */
-	assert(sizeof(struct smp_ident) == SMP_IDENT_SIZE);
-
-	/* Allocate softc */
-	ALLOC_OBJ(sc, SMP_SC_MAGIC);
-	XXXAN(sc);
-	sc->parent = parent;
-	sc->fd = -1;
-	VTAILQ_INIT(&sc->segments);
-
-	/* Argument processing */
-	if (ac != 2)
-		ARGV_ERR("(-spersistent) wrong number of arguments\n");
-
-	i = STV_GetFile(av[0], &sc->fd, &sc->filename, "-spersistent");
-	if (i == 2)
-		ARGV_ERR("(-spersistent) need filename (not directory)\n");
-
-	sc->align = sizeof(void*) * 2;
-	sc->granularity = getpagesize();
-	sc->mediasize = STV_FileSize(sc->fd, av[1], &sc->granularity,
-	    "-spersistent");
-
-	AZ(ftruncate(sc->fd, sc->mediasize));
-
-	/* Try to determine correct mmap address */
-	i = read(sc->fd, &sgn, sizeof sgn);
-	assert(i == sizeof sgn);
-	if (!strcmp(sgn.ident, "SILO"))
-		target = (void*)(uintptr_t)sgn.mapped;
-	else
-		target = NULL;
-
-	sc->base = mmap(target, sc->mediasize, PROT_READ|PROT_WRITE,
-	    MAP_NOCORE | MAP_NOSYNC | MAP_SHARED, sc->fd, 0);
-
-	if (sc->base == MAP_FAILED)
-		ARGV_ERR("(-spersistent) failed to mmap (%s)\n",
-		    strerror(errno));
-
-	smp_def_sign(sc, &sc->idn, 0, "SILO");
-	sc->ident = SIGN_DATA(&sc->idn);
-
-	i = smp_valid_silo(sc);
-	if (i) {
-		printf("Warning SILO (%s) not reloaded (reason=%d)\n",
-		    sc->filename, i);
-		smp_newsilo(sc);
-	}
-	AZ(smp_valid_silo(sc));
-
-	smp_metrics(sc);
-
-	parent->priv = sc;
-
-	/* XXX: only for sendfile I guess... */
-	mgt_child_inherit(sc->fd, "storage_persistent");
-}
diff --git a/bin/varnishd/storage_persistent_silo.c b/bin/varnishd/storage_persistent_silo.c
deleted file mode 100644
index 8209613..0000000
--- a/bin/varnishd/storage_persistent_silo.c
+++ /dev/null
@@ -1,524 +0,0 @@
-/*-
- * Copyright (c) 2008-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Persistent storage method
- *
- * XXX: Before we start the client or maybe after it stops, we should give the
- * XXX: stevedores a chance to examine their storage for consistency.
- *
- */
-
-#include "config.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "cache.h"
-#include "storage.h"
-
-#include "hash_slinger.h"
-#include "vsha256.h"
-#include "vtim.h"
-
-#include "persistent.h"
-#include "storage_persistent.h"
-
-/*--------------------------------------------------------------------
- * Write the segmentlist back to the silo.
- *
- * We write the first copy, sync it synchronously, then write the
- * second copy and sync it synchronously.
- *
- * Provided the kernel doesn't lie, that means we will always have
- * at least one valid copy on in the silo.
- */
-
-static void
-smp_save_seg(const struct smp_sc *sc, struct smp_signctx *ctx)
-{
-	struct smp_segptr *ss;
-	struct smp_seg *sg;
-	uint64_t length;
-
-	Lck_AssertHeld(&sc->mtx);
-	smp_reset_sign(ctx);
-	ss = SIGN_DATA(ctx);
-	length = 0;
-	VTAILQ_FOREACH(sg, &sc->segments, list) {
-		assert(sg->p.offset < sc->mediasize);
-		assert(sg->p.offset + sg->p.length <= sc->mediasize);
-		*ss = sg->p;
-		ss++;
-		length += sizeof *ss;
-	}
-	smp_append_sign(ctx, SIGN_DATA(ctx), length);
-	smp_sync_sign(ctx);
-}
-
-void
-smp_save_segs(struct smp_sc *sc)
-{
-	struct smp_seg *sg, *sg2;
-
-	Lck_AssertHeld(&sc->mtx);
-
-	/*
-	 * Remove empty segments from the front of the list
-	 * before we write the segments to disk.
-	 */
-	VTAILQ_FOREACH_SAFE(sg, &sc->segments, list, sg2) {
-		if (sg->nobj > 0)
-			break;
-		if (sg == sc->cur_seg)
-			continue;
-		VTAILQ_REMOVE(&sc->segments, sg, list);
-		LRU_Free(sg->lru);
-		FREE_OBJ(sg);
-	}
-	smp_save_seg(sc, &sc->seg1);
-	smp_save_seg(sc, &sc->seg2);
-}
-
-/*--------------------------------------------------------------------
- * Load segments
- *
- * The overall objective is to register the existence of an object, based
- * only on the minimally sized struct smp_object, without causing the
- * main object to be faulted in.
- *
- * XXX: We can test this by mprotecting the main body of the segment
- * XXX: until the first fixup happens, or even just over this loop,
- * XXX: However: the requires that the smp_objects starter further
- * XXX: into the segment than a page so that they do not get hit
- * XXX: by the protection.
- */
-
-void
-smp_load_seg(const struct sess *sp, const struct smp_sc *sc,
-    struct smp_seg *sg)
-{
-	struct smp_object *so;
-	struct objcore *oc;
-	uint32_t no;
-	double t_now = VTIM_real();
-	struct smp_signctx ctx[1];
-
-	ASSERT_SILO_THREAD(sc);
-	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
-	CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
-	CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
-	assert(sg->flags & SMP_SEG_MUSTLOAD);
-	sg->flags &= ~SMP_SEG_MUSTLOAD;
-	AN(sg->p.offset);
-	if (sg->p.objlist == 0)
-		return;
-	smp_def_sign(sc, ctx, sg->p.offset, "SEGHEAD");
-	if (smp_chk_sign(ctx))
-		return;
-
-	/* test SEGTAIL */
-	/* test OBJIDX */
-	so = (void*)(sc->base + sg->p.objlist);
-	sg->objs = so;
-	no = sg->p.lobjlist;
-	/* Clear the bogus "hold" count */
-	sg->nobj = 0;
-	for (;no > 0; so++,no--) {
-		if (so->ttl == 0 || so->ttl < t_now)
-			continue;
-		HSH_Prealloc(sp);
-		oc = sp->wrk->nobjcore;
-		oc->flags |= OC_F_NEEDFIXUP | OC_F_LRUDONTMOVE;
-		oc->flags &= ~OC_F_BUSY;
-		smp_init_oc(oc, sg, no);
-		oc->ban = BAN_RefBan(oc, so->ban, sc->tailban);
-		memcpy(sp->wrk->nobjhead->digest, so->hash, SHA256_LEN);
-		(void)HSH_Insert(sp);
-		AZ(sp->wrk->nobjcore);
-		EXP_Inject(oc, sg->lru, so->ttl);
-		sg->nobj++;
-	}
-	WRK_SumStat(sp->wrk);
-	sg->flags |= SMP_SEG_LOADED;
-}
-
-/*--------------------------------------------------------------------
- * Create a new segment
- */
-
-void
-smp_new_seg(struct smp_sc *sc)
-{
-	struct smp_seg *sg, *sg2;
-
-	Lck_AssertHeld(&sc->mtx);
-	ALLOC_OBJ(sg, SMP_SEG_MAGIC);
-	AN(sg);
-	sg->sc = sc;
-	sg->lru = LRU_Alloc();
-	CHECK_OBJ_NOTNULL(sg->lru, LRU_MAGIC);
-
-	/* XXX: find where it goes in silo */
-
-	sg->p.offset = sc->free_offset;
-	// XXX: align */
-	assert(sg->p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
-	assert(sg->p.offset < sc->mediasize);
-
-	sg->p.length = sc->aim_segl;
-	sg->p.length &= ~7;
-
-	if (smp_segend(sg) > sc->mediasize) {
-		sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
-		sg->p.offset = sc->free_offset;
-		sg2 = VTAILQ_FIRST(&sc->segments);
-		if (smp_segend(sg) > sg2->p.offset) {
-			printf("Out of space in persistent silo\n");
-			printf("Committing suicide, restart will make space\n");
-			exit (0);
-		}
-	}
-
-
-	assert(smp_segend(sg) <= sc->mediasize);
-
-	sg2 = VTAILQ_FIRST(&sc->segments);
-	if (sg2 != NULL && sg2->p.offset > sc->free_offset) {
-		if (smp_segend(sg) > sg2->p.offset) {
-			printf("Out of space in persistent silo\n");
-			printf("Committing suicide, restart will make space\n");
-			exit (0);
-		}
-		assert(smp_segend(sg) <= sg2->p.offset);
-	}
-
-	sg->p.offset = IRNUP(sc, sg->p.offset);
-	sg->p.length = IRNDN(sc, sg->p.length);
-	sc->free_offset = sg->p.offset + sg->p.length;
-
-	VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
-
-	/* Neuter the new segment in case there is an old one there */
-	AN(sg->p.offset);
-	smp_def_sign(sc, sg->ctx, sg->p.offset, "SEGHEAD");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
-
-	/* Set up our allocation points */
-	sc->cur_seg = sg;
-	sc->next_bot = sg->p.offset + IRNUP(sc, SMP_SIGN_SPACE);
-	sc->next_top = smp_segend(sg);
-	sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
-	IASSERTALIGN(sc, sc->next_bot);
-	IASSERTALIGN(sc, sc->next_top);
-	sg->objs = (void*)(sc->base + sc->next_top);
-}
-
-/*--------------------------------------------------------------------
- * Close a segment
- */
-
-void
-smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
-{
-	uint64_t left, dst, len;
-	void *dp;
-
-	Lck_AssertHeld(&sc->mtx);
-
-	assert(sg == sc->cur_seg);
-	AN(sg->p.offset);
-	sc->cur_seg = NULL;
-
-	if (sg->nalloc == 0) {
-		/* XXX: if segment is empty, delete instead */
-		VTAILQ_REMOVE(&sc->segments, sg, list);
-		free(sg);
-		return;
-	}
-
-	/*
-	 * If there is enough space left, that we can move the smp_objects
-	 * down without overwriting the present copy, we will do so to
-	 * compact the segment.
-	 */
-	left = smp_spaceleft(sc, sg);
-	len = sizeof(struct smp_object) * sg->p.lobjlist;
-	if (len < left) {
-		dst = sc->next_bot + IRNUP(sc, SMP_SIGN_SPACE);
-		dp = sc->base + dst;
-		assert((uintptr_t)dp + len < (uintptr_t)sg->objs);
-		memcpy(dp, sg->objs, len);
-		sc->next_top = dst;
-		sg->objs = dp;
-		sg->p.length = (sc->next_top - sg->p.offset)
-		     + len + IRNUP(sc, SMP_SIGN_SPACE);
-		(void)smp_spaceleft(sc, sg);	/* for the asserts */
-
-	}
-
-	/* Update the segment header */
-	sg->p.objlist = sc->next_top;
-
-	/* Write the (empty) OBJIDX signature */
-	sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
-	assert(sc->next_top >= sc->next_bot);
-	smp_def_sign(sc, sg->ctx, sc->next_top, "OBJIDX");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
-
-	/* Write the (empty) SEGTAIL signature */
-	smp_def_sign(sc, sg->ctx,
-	    sg->p.offset + sg->p.length - IRNUP(sc, SMP_SIGN_SPACE), "SEGTAIL");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
-
-	/* Save segment list */
-	smp_save_segs(sc);
-	sc->free_offset = smp_segend(sg);
-}
-
-
-/*---------------------------------------------------------------------
- */
-
-static struct smp_object *
-smp_find_so(const struct smp_seg *sg, unsigned priv2)
-{
-	struct smp_object *so;
-
-	assert(priv2 > 0);
-	assert(priv2 <= sg->p.lobjlist);
-	so = &sg->objs[sg->p.lobjlist - priv2];
-	return (so);
-}
-
-/*---------------------------------------------------------------------
- * Check if a given storage structure is valid to use
- */
-
-static int
-smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
-    const struct storage *st)
-{
-	struct smp_seg *sg2;
-	const uint8_t *pst;
-	uint64_t o;
-
-	(void)sg;		/* XXX: faster: Start search from here */
-	pst = (const void *)st;
-
-	if (pst < (sc->base + sc->ident->stuff[SMP_SPC_STUFF]))
-		return (0x01);		/* Before silo payload start */
-	if (pst > (sc->base + sc->ident->stuff[SMP_END_STUFF]))
-		return (0x02);		/* After silo end */
-
-	o = pst - sc->base;
-
-	/* Find which segment contains the storage structure */
-	VTAILQ_FOREACH(sg2, &sc->segments, list)
-		if (o > sg2->p.offset && (o + sizeof(*st)) < sg2->p.objlist)
-			break;
-	if (sg2 == NULL)
-		return (0x04);		/* No claiming segment */
-	if (!(sg2->flags & SMP_SEG_LOADED))
-		return (0x08);		/* Claiming segment not loaded */
-
-	/* It is now safe to access the storage structure */
-	if (st->magic != STORAGE_MAGIC)
-		return (0x10);		/* Not enough magic */
-
-	if (o + st->space >= sg2->p.objlist)
-		return (0x20);		/* Allocation not inside segment */
-
-	if (st->len > st->space)
-		return (0x40);		/* Plain bad... */
-
-	/*
-	 * XXX: We could patch up st->stevedore and st->priv here
-	 * XXX: but if things go right, we will never need them.
-	 */
-	return (0);
-}
-
-/*---------------------------------------------------------------------
- * objcore methods for persistent objects
- */
-
-static struct object *
-smp_oc_getobj(struct worker *wrk, struct objcore *oc)
-{
-	struct object *o;
-	struct smp_seg *sg;
-	struct smp_object *so;
-	struct storage *st;
-	uint64_t l;
-	int bad;
-
-	/* Some calls are direct, but they should match anyway */
-	assert(oc->methods->getobj == smp_oc_getobj);
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	if (wrk == NULL)
-		AZ(oc->flags & OC_F_NEEDFIXUP);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	so = smp_find_so(sg, oc->priv2);
-
-	o = (void*)(sg->sc->base + so->ptr);
-	/*
-	 * The object may not be in this segment since we allocate it
-	 * In a separate operation than the smp_object.  We could check
-	 * that it is in a later segment, but that would be complicated.
-	 * XXX: For now, be happy if it is inside th silo
-	 */
-	ASSERT_PTR_IN_SILO(sg->sc, o);
-	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
-
-	/*
-	 * If this flag is not set, it will not be, and the lock is not
-	 * needed to test it.
-	 */
-	if (!(oc->flags & OC_F_NEEDFIXUP))
-		return (o);
-
-	AN(wrk);
-	Lck_Lock(&sg->sc->mtx);
-	/* Check again, we might have raced. */
-	if (oc->flags & OC_F_NEEDFIXUP) {
-		/* We trust caller to have a refcnt for us */
-		o->objcore = oc;
-
-		bad = 0;
-		l = 0;
-		VTAILQ_FOREACH(st, &o->store, list) {
-			bad |= smp_loaded_st(sg->sc, sg, st);
-			if (bad)
-				break;
-			l += st->len;
-		}
-		if (l != o->len)
-			bad |= 0x100;
-
-		if(bad) {
-			EXP_Set_ttl(&o->exp, -1);
-			so->ttl = 0;
-		}
-
-		sg->nfixed++;
-		wrk->stats.n_object++;
-		wrk->stats.n_vampireobject--;
-		oc->flags &= ~OC_F_NEEDFIXUP;
-	}
-	Lck_Unlock(&sg->sc->mtx);
-	EXP_Rearm(o);
-	return (o);
-}
-
-static void
-smp_oc_updatemeta(struct objcore *oc)
-{
-	struct object *o;
-	struct smp_seg *sg;
-	struct smp_object *so;
-	double mttl;
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	o = smp_oc_getobj(NULL, oc);
-	AN(o);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	CHECK_OBJ_NOTNULL(sg->sc, SMP_SC_MAGIC);
-	so = smp_find_so(sg, oc->priv2);
-
-	mttl = EXP_Grace(NULL, o);
-
-	if (sg == sg->sc->cur_seg) {
-		/* Lock necessary, we might race close_seg */
-		Lck_Lock(&sg->sc->mtx);
-		so->ban = BAN_Time(oc->ban);
-		so->ttl = mttl;
-		Lck_Unlock(&sg->sc->mtx);
-	} else {
-		so->ban = BAN_Time(oc->ban);
-		so->ttl = mttl;
-	}
-}
-
-static void __match_proto__()
-smp_oc_freeobj(struct objcore *oc)
-{
-	struct smp_seg *sg;
-	struct smp_object *so;
-
-	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	so = smp_find_so(sg, oc->priv2);
-
-	Lck_Lock(&sg->sc->mtx);
-	so->ttl = 0;
-	so->ptr = 0;
-
-	assert(sg->nobj > 0);
-	assert(sg->nfixed > 0);
-	sg->nobj--;
-	sg->nfixed--;
-
-	Lck_Unlock(&sg->sc->mtx);
-}
-
-/*--------------------------------------------------------------------
- * Find the per-segment lru list for this object
- */
-
-static struct lru *
-smp_oc_getlru(const struct objcore *oc)
-{
-	struct smp_seg *sg;
-
-	CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
-	return (sg->lru);
-}
-
-static struct objcore_methods smp_oc_methods = {
-	.getobj =		smp_oc_getobj,
-	.updatemeta =		smp_oc_updatemeta,
-	.freeobj =		smp_oc_freeobj,
-	.getlru =		smp_oc_getlru,
-};
-
-/*--------------------------------------------------------------------*/
-
-void
-smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx)
-{
-
-	oc->priv = sg;
-	oc->priv2 = objidx;
-	oc->methods = &smp_oc_methods;
-}
diff --git a/bin/varnishd/storage_persistent_subr.c b/bin/varnishd/storage_persistent_subr.c
deleted file mode 100644
index b4bbb3f..0000000
--- a/bin/varnishd/storage_persistent_subr.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*-
- * Copyright (c) 2008-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Persistent storage method
- *
- * XXX: Before we start the client or maybe after it stops, we should give the
- * XXX: stevedores a chance to examine their storage for consistency.
- *
- * XXX: Do we ever free the LRU-lists ?
- */
-
-#include "config.h"
-
-#include <sys/mman.h>
-
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "cache.h"
-#include "storage.h"
-
-#include "vsha256.h"
-
-#include "persistent.h"
-#include "storage_persistent.h"
-
-/*--------------------------------------------------------------------
- * SIGNATURE functions
- * The signature is SHA256 over:
- *    1. The smp_sign struct up to but not including the length field.
- *    2. smp_sign->length bytes, starting after the smp_sign structure
- *    3. The smp-sign->length field.
- * The signature is stored after the byte-range from step 2.
- */
-
-/*--------------------------------------------------------------------
- * Define a signature by location and identifier.
- */
-
-void
-smp_def_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
-    uint64_t off, const char *id)
-{
-
-	AZ(off & 7);			/* Alignment */
-	assert(strlen(id) < sizeof ctx->ss->ident);
-
-	memset(ctx, 0, sizeof *ctx);
-	ctx->ss = (void*)(sc->base + off);
-	ctx->unique = sc->unique;
-	ctx->id = id;
-}
-
-/*--------------------------------------------------------------------
- * Check that a signature is good, leave state ready for append
- */
-int
-smp_chk_sign(struct smp_signctx *ctx)
-{
-	struct SHA256Context cx;
-	unsigned char sign[SHA256_LEN];
-	int r = 0;
-
-	if (strncmp(ctx->id, ctx->ss->ident, sizeof ctx->ss->ident))
-		r = 1;
-	else if (ctx->unique != ctx->ss->unique)
-		r = 2;
-	else if ((uintptr_t)ctx->ss != ctx->ss->mapped)
-		r = 3;
-	else {
-		SHA256_Init(&ctx->ctx);
-		SHA256_Update(&ctx->ctx, ctx->ss,
-		    offsetof(struct smp_sign, length));
-		SHA256_Update(&ctx->ctx, SIGN_DATA(ctx), ctx->ss->length);
-		cx = ctx->ctx;
-		SHA256_Update(&cx, &ctx->ss->length, sizeof(ctx->ss->length));
-		SHA256_Final(sign, &cx);
-		if (memcmp(sign, SIGN_END(ctx), sizeof sign))
-			r = 4;
-	}
-	if (r) {
-		fprintf(stderr, "CHK(%p %s %p %s) = %d\n",
-		    ctx, ctx->id, ctx->ss,
-		    r > 1 ? ctx->ss->ident : "<invalid>", r);
-	}
-	return (r);
-}
-
-/*--------------------------------------------------------------------
- * Append data to a signature
- */
-void
-smp_append_sign(struct smp_signctx *ctx, const void *ptr, uint32_t len)
-{
-	struct SHA256Context cx;
-	unsigned char sign[SHA256_LEN];
-
-	if (len != 0) {
-		SHA256_Update(&ctx->ctx, ptr, len);
-		ctx->ss->length += len;
-	}
-	cx = ctx->ctx;
-	SHA256_Update(&cx, &ctx->ss->length, sizeof(ctx->ss->length));
-	SHA256_Final(sign, &cx);
-	memcpy(SIGN_END(ctx), sign, sizeof sign);
-XXXAZ(smp_chk_sign(ctx));
-}
-
-/*--------------------------------------------------------------------
- * Reset a signature to empty, prepare for appending.
- */
-
-void
-smp_reset_sign(struct smp_signctx *ctx)
-{
-
-	memset(ctx->ss, 0, sizeof *ctx->ss);
-	strcpy(ctx->ss->ident, ctx->id);
-	ctx->ss->unique = ctx->unique;
-	ctx->ss->mapped = (uintptr_t)ctx->ss;
-	SHA256_Init(&ctx->ctx);
-	SHA256_Update(&ctx->ctx, ctx->ss,
-	    offsetof(struct smp_sign, length));
-	smp_append_sign(ctx, NULL, 0);
-}
-
-/*--------------------------------------------------------------------
- * Force a write of a signature block to the backing store.
- */
-
-void
-smp_sync_sign(const struct smp_signctx *ctx)
-{
-	int i;
-
-	/* XXX: round to pages */
-	i = msync((void*)ctx->ss, ctx->ss->length + SHA256_LEN, MS_SYNC);
-	if (i && 0)
-		fprintf(stderr, "SyncSign(%p %s) = %d %s\n",
-		    ctx->ss, ctx->id, i, strerror(errno));
-}
-
-/*--------------------------------------------------------------------
- * Create and force a new signature to backing store
- */
-
-static void
-smp_new_sign(const struct smp_sc *sc, struct smp_signctx *ctx,
-    uint64_t off, const char *id)
-{
-	smp_def_sign(sc, ctx, off, id);
-	smp_reset_sign(ctx);
-	smp_sync_sign(ctx);
-}
-
-/*--------------------------------------------------------------------
- * Initialize a Silo with a valid but empty structure.
- *
- * XXX: more intelligent sizing of things.
- */
-
-void
-smp_newsilo(struct smp_sc *sc)
-{
-	struct smp_ident	*si;
-
-	ASSERT_MGT();
-	assert(strlen(SMP_IDENT_STRING) < sizeof si->ident);
-
-	/* Choose a new random number */
-	sc->unique = random();
-
-	smp_reset_sign(&sc->idn);
-	si = sc->ident;
-
-	memset(si, 0, sizeof *si);
-	strcpy(si->ident, SMP_IDENT_STRING);
-	si->byte_order = 0x12345678;
-	si->size = sizeof *si;
-	si->major_version = 2;
-	si->unique = sc->unique;
-	si->mediasize = sc->mediasize;
-	si->granularity = sc->granularity;
-	/*
-	 * Aim for cache-line-width
-	 */
-	si->align = sizeof(void*) * 2;
-	sc->align = si->align;
-
-	si->stuff[SMP_BAN1_STUFF] = sc->granularity;
-	si->stuff[SMP_BAN2_STUFF] = si->stuff[SMP_BAN1_STUFF] + 1024*1024;
-	si->stuff[SMP_SEG1_STUFF] = si->stuff[SMP_BAN2_STUFF] + 1024*1024;
-	si->stuff[SMP_SEG2_STUFF] = si->stuff[SMP_SEG1_STUFF] + 1024*1024;
-	si->stuff[SMP_SPC_STUFF] = si->stuff[SMP_SEG2_STUFF] + 1024*1024;
-	si->stuff[SMP_END_STUFF] = si->mediasize;
-	assert(si->stuff[SMP_SPC_STUFF] < si->stuff[SMP_END_STUFF]);
-
-	smp_new_sign(sc, &sc->ban1, si->stuff[SMP_BAN1_STUFF], "BAN 1");
-	smp_new_sign(sc, &sc->ban2, si->stuff[SMP_BAN2_STUFF], "BAN 2");
-	smp_new_sign(sc, &sc->seg1, si->stuff[SMP_SEG1_STUFF], "SEG 1");
-	smp_new_sign(sc, &sc->seg2, si->stuff[SMP_SEG2_STUFF], "SEG 2");
-
-	smp_append_sign(&sc->idn, si, sizeof *si);
-	smp_sync_sign(&sc->idn);
-}
-
-/*--------------------------------------------------------------------
- * Check if a silo is valid.
- */
-
-int
-smp_valid_silo(struct smp_sc *sc)
-{
-	struct smp_ident	*si;
-	int i, j;
-
-	assert(strlen(SMP_IDENT_STRING) < sizeof si->ident);
-
-	i = smp_chk_sign(&sc->idn);
-	if (i)
-		return (i);
-
-	si = sc->ident;
-	if (strcmp(si->ident, SMP_IDENT_STRING))
-		return (12);
-	if (si->byte_order != 0x12345678)
-		return (13);
-	if (si->size != sizeof *si)
-		return (14);
-	if (si->major_version != 2)
-		return (15);
-	if (si->mediasize != sc->mediasize)
-		return (17);
-	if (si->granularity != sc->granularity)
-		return (18);
-	if (si->align < sizeof(void*))
-		return (19);
-	if (!PWR2(si->align))
-		return (20);
-	sc->align = si->align;
-	sc->unique = si->unique;
-
-	/* XXX: Sanity check stuff[6] */
-
-	assert(si->stuff[SMP_BAN1_STUFF] > sizeof *si + SHA256_LEN);
-	assert(si->stuff[SMP_BAN2_STUFF] > si->stuff[SMP_BAN1_STUFF]);
-	assert(si->stuff[SMP_SEG1_STUFF] > si->stuff[SMP_BAN2_STUFF]);
-	assert(si->stuff[SMP_SEG2_STUFF] > si->stuff[SMP_SEG1_STUFF]);
-	assert(si->stuff[SMP_SPC_STUFF] > si->stuff[SMP_SEG2_STUFF]);
-	assert(si->stuff[SMP_END_STUFF] == sc->mediasize);
-
-	assert(smp_stuff_len(sc, SMP_SEG1_STUFF) > 65536);
-	assert(smp_stuff_len(sc, SMP_SEG1_STUFF) ==
-	  smp_stuff_len(sc, SMP_SEG2_STUFF));
-
-	assert(smp_stuff_len(sc, SMP_BAN1_STUFF) > 65536);
-	assert(smp_stuff_len(sc, SMP_BAN1_STUFF) ==
-	  smp_stuff_len(sc, SMP_BAN2_STUFF));
-
-	smp_def_sign(sc, &sc->ban1, si->stuff[SMP_BAN1_STUFF], "BAN 1");
-	smp_def_sign(sc, &sc->ban2, si->stuff[SMP_BAN2_STUFF], "BAN 2");
-	smp_def_sign(sc, &sc->seg1, si->stuff[SMP_SEG1_STUFF], "SEG 1");
-	smp_def_sign(sc, &sc->seg2, si->stuff[SMP_SEG2_STUFF], "SEG 2");
-
-	/* We must have one valid BAN table */
-	i = smp_chk_sign(&sc->ban1);
-	j = smp_chk_sign(&sc->ban2);
-	if (i && j)
-		return (100 + i * 10 + j);
-
-	/* We must have one valid SEG table */
-	i = smp_chk_sign(&sc->seg1);
-	j = smp_chk_sign(&sc->seg2);
-	if (i && j)
-		return (200 + i * 10 + j);
-	return (0);
-}
diff --git a/bin/varnishd/storage_synth.c b/bin/varnishd/storage_synth.c
deleted file mode 100644
index 5df2c08..0000000
--- a/bin/varnishd/storage_synth.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*-
- * Copyright (c) 2008-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Storage method for synthetic content, based on vsb.
- */
-
-#include "config.h"
-
-#include <stdlib.h>
-
-#include "cache.h"
-#include "storage.h"
-
-
-static struct lock		sms_mtx;
-
-static void
-sms_free(struct storage *sto)
-{
-
-	CHECK_OBJ_NOTNULL(sto, STORAGE_MAGIC);
-	Lck_Lock(&sms_mtx);
-	VSC_C_main->sms_nobj--;
-	VSC_C_main->sms_nbytes -= sto->len;
-	VSC_C_main->sms_bfree += sto->len;
-	Lck_Unlock(&sms_mtx);
-	VSB_delete(sto->priv);
-	free(sto);
-}
-
-void
-SMS_Init(void)
-{
-
-	Lck_New(&sms_mtx, lck_sms);
-}
-
-static struct stevedore sms_stevedore = {
-	.magic	=	STEVEDORE_MAGIC,
-	.name	=	"synth",
-	.free	=	sms_free,
-};
-
-struct vsb *
-SMS_Makesynth(struct object *obj)
-{
-	struct storage *sto;
-	struct vsb *vsb;
-
-	CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC);
-	STV_Freestore(obj);
-	obj->len = 0;
-
-	Lck_Lock(&sms_mtx);
-	VSC_C_main->sms_nreq++;
-	VSC_C_main->sms_nobj++;
-	Lck_Unlock(&sms_mtx);
-
-	sto = calloc(sizeof *sto, 1);
-	XXXAN(sto);
-	vsb = VSB_new_auto();
-	XXXAN(vsb);
-	sto->priv = vsb;
-	sto->len = 0;
-	sto->space = 0;
-#ifdef SENDFILE_WORKS
-	sto->fd = -1;
-#endif
-	sto->stevedore = &sms_stevedore;
-	sto->magic = STORAGE_MAGIC;
-
-	VTAILQ_INSERT_TAIL(&obj->store, sto, list);
-	return (vsb);
-}
-
-void
-SMS_Finish(struct object *obj)
-{
-	struct storage *sto;
-	struct vsb *vsb;
-
-	CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC);
-	sto = VTAILQ_FIRST(&obj->store);
-	assert(sto->stevedore == &sms_stevedore);
-	vsb = sto->priv;
-	AZ(VSB_finish(vsb));
-
-	sto->ptr = (void*)VSB_data(vsb);
-	sto->len = VSB_len(vsb);
-	sto->space = VSB_len(vsb);
-	obj->len = sto->len;
-	Lck_Lock(&sms_mtx);
-	VSC_C_main->sms_nbytes += sto->len;
-	VSC_C_main->sms_balloc += sto->len;
-	Lck_Unlock(&sms_mtx);
-}
diff --git a/bin/varnishd/storage_umem.c b/bin/varnishd/storage_umem.c
deleted file mode 100644
index 9198a99..0000000
--- a/bin/varnishd/storage_umem.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*-
- * Copyright (c) 2006 Verdens Gang AS
- * Copyright (c) 2006-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Storage method based on umem_alloc(3MALLOC)
- */
-
-#include "config.h"
-
-#ifdef HAVE_LIBUMEM
-
-#include <sys/types.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <umem.h>
-
-#include "cache.h"
-#include "storage.h"
-
-static size_t			smu_max = SIZE_MAX;
-static MTX			smu_mtx;
-
-struct smu {
-	struct storage		s;
-	size_t			sz;
-};
-
-static struct storage *
-smu_alloc(struct stevedore *st, size_t size)
-{
-	struct smu *smu;
-
-	Lck_Lock(&smu_mtx);
-	VSC_C_main->sma_nreq++;
-	if (VSC_C_main->sma_nbytes + size > smu_max)
-		size = 0;
-	else {
-		VSC_C_main->sma_nobj++;
-		VSC_C_main->sma_nbytes += size;
-		VSC_C_main->sma_balloc += size;
-	}
-	Lck_Unlock(&smu_mtx);
-
-	if (size == 0)
-		return (NULL);
-
-	smu = umem_zalloc(sizeof *smu, UMEM_DEFAULT);
-	if (smu == NULL)
-		return (NULL);
-	smu->sz = size;
-	smu->s.priv = smu;
-	smu->s.ptr = umem_alloc(size, UMEM_DEFAULT);
-	XXXAN(smu->s.ptr);
-	smu->s.len = 0;
-	smu->s.space = size;
-	smu->s.fd = -1;
-	smu->s.stevedore = st;
-	smu->s.magic = STORAGE_MAGIC;
-	return (&smu->s);
-}
-
-static void
-smu_free(struct storage *s)
-{
-	struct smu *smu;
-
-	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
-	smu = s->priv;
-	assert(smu->sz == smu->s.space);
-	Lck_Lock(&smu_mtx);
-	VSC_C_main->sma_nobj--;
-	VSC_C_main->sma_nbytes -= smu->sz;
-	VSC_C_main->sma_bfree += smu->sz;
-	Lck_Unlock(&smu_mtx);
-	umem_free(smu->s.ptr, smu->s.space);
-	umem_free(smu, sizeof *smu);
-}
-
-static void
-smu_trim(const struct storage *s, size_t size)
-{
-	struct smu *smu;
-	void *p;
-
-	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
-	smu = s->priv;
-	assert(smu->sz == smu->s.space);
-	if ((p = umem_alloc(size, UMEM_DEFAULT)) != NULL) {
-		memcpy(p, smu->s.ptr, size);
-		umem_free(smu->s.ptr, smu->s.space);
-		Lck_Lock(&smu_mtx);
-		VSC_C_main->sma_nbytes -= (smu->sz - size);
-		VSC_C_main->sma_bfree += smu->sz - size;
-		smu->sz = size;
-		Lck_Unlock(&smu_mtx);
-		smu->s.ptr = p;
-		smu->s.space = size;
-	}
-}
-
-static void
-smu_init(struct stevedore *parent, int ac, char * const *av)
-{
-	const char *e;
-	uintmax_t u;
-
-	(void)parent;
-
-	AZ(av[ac]);
-	if (ac > 1)
-		ARGV_ERR("(-sumem) too many arguments\n");
-
-	if (ac == 0 || *av[0] == '\0')
-		 return;
-
-	e = VNUM_2bytes(av[0], &u, 0);
-	if (e != NULL)
-		ARGV_ERR("(-sumem) size \"%s\": %s\n", av[0], e);
-	if ((u != (uintmax_t)(size_t)u))
-		ARGV_ERR("(-sumem) size \"%s\": too big\n", av[0]);
-	smu_max = u;
-}
-
-static void
-smu_open(const struct stevedore *st)
-{
-	(void)st;
-	AZ(pthread_mutex_init(&smu_mtx, NULL));
-}
-
-const struct stevedore smu_stevedore = {
-	.magic	=	STEVEDORE_MAGIC,
-	.name	=	"umem",
-	.init	=	smu_init,
-	.open	=	smu_open,
-	.alloc	=	smu_alloc,
-	.free	=	smu_free,
-	.trim	=	smu_trim,
-};
-
-#endif /* HAVE_UMEM_H */



More information about the varnish-commit mailing list