[experimental-ims] a18ff47 Merge master -> experimental-ims Since STV_alloc() was changed in master so that it assumes allocation is only for busyobj->fecthobj, there's a problem with the duplication of storage from stale_obj into the newly fetched obj -- for copying, it needs to allocate storage, and now just calls stv->alloc, without STV_alloc's logic to check for exhausted storage, nuke if necessary, etc. This is *not* safe now.
Geoff Simmons
geoff at varnish-cache.org
Fri Feb 17 13:58:54 CET 2012
commit a18ff477361a7fc69f8777e963921fc10ede1110
Merge: 28026e2 7d164e0
Author: Geoff Simmons <geoff at uplex.de>
Date: Fri Feb 17 12:50:07 2012 +0100
Merge master -> experimental-ims
Since STV_alloc() was changed in master so that it assumes allocation
is only for busyobj->fecthobj, there's a problem with the duplication
of storage from stale_obj into the newly fetched obj -- for copying, it
needs to allocate storage, and now just calls stv->alloc, without STV_alloc's
logic to check for exhausted storage, nuke if necessary, etc. This is
*not* safe now.
All the more reason IMO to implement a solution with linking and refcounting
the storage. IMS should not be messing around with stevedores.
diff --cc bin/varnishd/cache/cache_center.c
index 517ff3b,4b0429a..6dc84be
--- a/bin/varnishd/cache/cache_center.c
+++ b/bin/varnishd/cache/cache_center.c
@@@ -920,25 -884,6 +909,25 @@@ cnt_fetchbody(struct sess *sp, struct w
/* Use unmodified headers*/
i = FetchBody(wrk, req->obj);
+ /*
+ * If a stale_obj was found, dup its storage into the new obj,
+ * reset Content-Length from the size of the storage, and discard
+ * the stale_obj.
+ */
+ if (bo->stale_obj) {
- STV_dup(sp, bo->stale_obj, req->obj);
++ STV_dup(bo->stale_obj, req->obj);
+ assert(bo->stale_obj->len == req->obj->len);
+
+ http_Unset(req->obj->http, H_Content_Length);
+ http_PrintfHeader(req->obj->http, "Content-Length: %lu",
+ req->obj->len);
+
+ EXP_Clr(&bo->stale_obj->exp);
+ EXP_Rearm(bo->stale_obj);
+ HSH_Deref(&sp->wrk->stats, NULL, &bo->stale_obj);
+ AZ(bo->stale_obj);
+ }
+
http_Teardown(bo->bereq);
http_Teardown(bo->beresp);
bo->vfp = NULL;
diff --cc bin/varnishd/storage/stevedore.c
index 8ecab24,2fce063..46c3332
--- a/bin/varnishd/storage/stevedore.c
+++ b/bin/varnishd/storage/stevedore.c
@@@ -374,13 -379,10 +379,9 @@@ STV_Freestore(struct object *o
/*-------------------------------------------------------------------*/
struct storage *
- STV_alloc(struct worker *w, size_t size)
+ STV_alloc(struct busyobj *bo, size_t size)
{
- struct object *obj = w->busyobj->fetch_obj;
- if (obj == NULL)
- obj = w->sp->req->obj;
--
- return (stv_alloc(w, obj, size));
+ return (stv_alloc(bo, size));
}
void
@@@ -393,30 -395,6 +394,30 @@@ STV_trim(struct storage *st, size_t siz
st->stevedore->trim(st, size);
}
+/*
+ * Duplicate the object storage (HTML body) from src into target, using a
+ * stevedore-specific dup method for src's stevedore.
+ *
+ * Currently, every method just copies storage from one object to the other,
+ * but this method of encapsulation opens the path to future techniques of
+ * sharing storage together with reference counting.
+ */
+void
- STV_dup(const struct sess *sp, struct object *src, struct object *target)
++STV_dup(struct object *src, struct object *target)
+{
+ struct stevedore *stv;
+
+ CHECK_OBJ_NOTNULL(src, OBJECT_MAGIC);
+ CHECK_OBJ_NOTNULL(target, OBJECT_MAGIC);
+ CHECK_OBJ_NOTNULL(src->objstore, STORAGE_MAGIC);
+ CHECK_OBJ_NOTNULL(src->objstore->stevedore, STEVEDORE_MAGIC);
+
+ stv = src->objstore->stevedore;
+ AN(stv->dup);
+
- stv->dup(sp, src, target);
++ stv->dup(src, target);
+}
+
void
STV_free(struct storage *st)
{
@@@ -501,24 -479,3 +502,35 @@@ VRT_Stv_##nm(const char *nm)
#include "tbl/vrt_stv_var.h"
#undef VRTSTVVAR
+
+/*
+ * Default object store dup just copies the storage.
+ */
+void
- default_dup(const struct sess *sp, struct object *src, struct object *target)
++default_dup(struct object *src, struct object *target)
+{
+ struct storage *st, *st2;
+ unsigned cl;
++ struct stevedore *stv = target->objstore->stevedore;
++
++ CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC);
++ AN(stv->alloc);
+
++ /*
++ * XXX: This should use the logic of STV_alloc() instead of just
++ * stv->alloc, to check if space is exhausted, nuke if necessary,
++ * etc. This is *not* safe; it risks running out of storage with
++ * no recovery. But STV_alloc() now assumes that allocs are only
++ * for busyobj->fetchobj.
++ */
+ VTAILQ_FOREACH(st2, &src->store, list) {
+ cl = st2->len;
- st = STV_alloc(sp->wrk, cl);
++ st = stv->alloc(stv, cl);
+ XXXAN(st);
+ assert(st->space >= cl);
+ VTAILQ_INSERT_TAIL(&target->store, st, list);
+ st->len = cl;
+ target->len += cl;
+ memcpy(st->ptr, st2->ptr, cl);
+ }
+}
diff --cc bin/varnishd/storage/storage.h
index 70468be,c8c3689..79d75a3
--- a/bin/varnishd/storage/storage.h
+++ b/bin/varnishd/storage/storage.h
@@@ -40,7 -40,6 +40,7 @@@ struct lru
typedef void storage_init_f(struct stevedore *, int ac, char * const *av);
typedef void storage_open_f(const struct stevedore *);
typedef struct storage *storage_alloc_f(struct stevedore *, size_t size);
- typedef void storage_dup_f(const struct sess *sp, struct object *src, struct object *target);
++typedef void storage_dup_f(struct object *src, struct object *target);
typedef void storage_trim_f(struct storage *, size_t size);
typedef void storage_free_f(struct storage *);
typedef struct object *storage_allocobj_f(struct stevedore *,
@@@ -105,7 -103,3 +105,7 @@@ extern const struct stevedore smp_steve
#ifdef HAVE_LIBUMEM
extern const struct stevedore smu_stevedore;
#endif
+
+/* Default dup method */
- void STV_dup(const struct sess *sp, struct object *src, struct object *target);
- void default_dup(const struct sess *sp, struct object *src, struct object *target);
++void STV_dup(struct object *src, struct object *target);
++void default_dup(struct object *src, struct object *target);
More information about the varnish-commit
mailing list