[master] 8b4036e Further tightening up of obj/stv/lru

Poul-Henning Kamp phk at FreeBSD.org
Fri Feb 5 13:31:45 CET 2016


commit 8b4036e951bb6b7c9fceb4175f452dba6c9d9e0f
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Fri Feb 5 12:30:40 2016 +0000

    Further tightening up of obj/stv/lru
    
    All objcores, also for synth and req.body are now born with a boc,
    which must of course be properly disposed of.
    
    More assertive LRU code.

diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index 24c85cb..5e9d30d 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -837,7 +837,7 @@ void *MPL_Get(struct mempool *mpl, unsigned *size);
 void MPL_Free(struct mempool *mpl, void *item);
 
 /* cache_obj.c */
-struct objcore * ObjNew(struct worker *, int wantboc);
+struct objcore * ObjNew(struct worker *);
 typedef int objiterate_f(void *priv, int flush, const void *ptr, ssize_t len);
 int ObjIterate(struct worker *, struct objcore *,
     void *priv, objiterate_f *func);
diff --git a/bin/varnishd/cache/cache_hash.c b/bin/varnishd/cache/cache_hash.c
index 6ad54e9..1a28fbf 100644
--- a/bin/varnishd/cache/cache_hash.c
+++ b/bin/varnishd/cache/cache_hash.c
@@ -90,7 +90,7 @@ hsh_prealloc(struct worker *wrk)
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
 
 	if (wrk->nobjcore == NULL) {
-		wrk->nobjcore = ObjNew(wrk, 1);
+		wrk->nobjcore = ObjNew(wrk);
 		wrk->nobjcore->flags |= OC_F_BUSY | OC_F_INCOMPLETE;
 	}
 	CHECK_OBJ_NOTNULL(wrk->nobjcore, OBJCORE_MAGIC);
@@ -108,13 +108,13 @@ hsh_prealloc(struct worker *wrk)
 /*---------------------------------------------------------------------*/
 
 struct objcore *
-HSH_Private(struct worker *wrk, int wantboc)
+HSH_Private(struct worker *wrk)
 {
 	struct objcore *oc;
 
 	CHECK_OBJ_NOTNULL(private_oh, OBJHEAD_MAGIC);
 
-	oc = ObjNew(wrk, wantboc);
+	oc = ObjNew(wrk);
 	AN(oc);
 	oc->refcnt = 1;
 	oc->objhead = private_oh;
diff --git a/bin/varnishd/cache/cache_obj.c b/bin/varnishd/cache/cache_obj.c
index 0ae34ed..9f0bdf2 100644
--- a/bin/varnishd/cache/cache_obj.c
+++ b/bin/varnishd/cache/cache_obj.c
@@ -65,7 +65,7 @@ obj_getmethods(const struct objcore *oc)
  */
 
 struct objcore *
-ObjNew(struct worker *wrk, int wantboc)
+ObjNew(struct worker *wrk)
 {
 	struct objcore *oc;
 
@@ -75,13 +75,11 @@ ObjNew(struct worker *wrk, int wantboc)
 	AN(oc);
 	wrk->stats->n_objectcore++;
 	oc->last_lru = NAN;
-	if (wantboc) {
-		ALLOC_OBJ(oc->boc, BOC_MAGIC);
-		AN(oc->boc);
-		Lck_New(&oc->boc->mtx, lck_busyobj);
-		AZ(pthread_cond_init(&oc->boc->cond, NULL));
-		oc->boc->refcount = 1;
-	}
+	ALLOC_OBJ(oc->boc, BOC_MAGIC);
+	AN(oc->boc);
+	Lck_New(&oc->boc->mtx, lck_busyobj);
+	AZ(pthread_cond_init(&oc->boc->cond, NULL));
+	oc->boc->refcount = 1;
 	return (oc);
 }
 
@@ -117,6 +115,7 @@ ObjGetSpace(struct worker *wrk, struct objcore *oc, ssize_t *sz, uint8_t **ptr)
 	const struct obj_methods *om = obj_getmethods(oc);
 
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC);
 	AN(sz);
 	AN(ptr);
 	assert(*sz > 0);
@@ -139,7 +138,7 @@ ObjExtend(struct worker *wrk, struct objcore *oc, ssize_t l)
 	uint64_t len;
 
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
-	CHECK_OBJ_ORNULL(oc->boc, BOC_MAGIC);
+	CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC);
 	assert(l > 0);
 
 	AZ(ObjGetU64(wrk, oc, OA_LEN, &len));
@@ -369,6 +368,7 @@ ObjSetAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr,
 	const struct obj_methods *om = obj_getmethods(oc);
 
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC);
 
 	AN(om->objsetattr);
 	assert((int)attr < 16);
@@ -451,6 +451,7 @@ ObjCopyAttr(struct worker *wrk, struct objcore *oc, struct objcore *ocs,
 
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC);
 	CHECK_OBJ_NOTNULL(ocs, OBJCORE_MAGIC);
 
 	vps = ObjGetAttr(wrk, ocs, attr, &l);
diff --git a/bin/varnishd/cache/cache_panic.c b/bin/varnishd/cache/cache_panic.c
index d670326..79566af 100644
--- a/bin/varnishd/cache/cache_panic.c
+++ b/bin/varnishd/cache/cache_panic.c
@@ -418,6 +418,8 @@ pan_req(struct vsb *vsb, const struct req *req)
 
 	VCL_Panic(vsb, req->vcl);
 
+	if (req->body_oc != NULL)
+		pan_objcore(vsb, "BODY", req->body_oc);
 	if (req->objcore != NULL)
 		pan_objcore(vsb, "REQ", req->objcore);
 
diff --git a/bin/varnishd/cache/cache_req_body.c b/bin/varnishd/cache/cache_req_body.c
index c3fd142..90145e4 100644
--- a/bin/varnishd/cache/cache_req_body.c
+++ b/bin/varnishd/cache/cache_req_body.c
@@ -216,16 +216,19 @@ VRB_Cache(struct req *req, ssize_t maxsize)
 		return (-1);
 	}
 
-	req->body_oc = HSH_Private(req->wrk, 0);
+	vfc->http = req->http;
+
+	req->body_oc = HSH_Private(req->wrk);
 	AN(req->body_oc);
 	XXXAN(STV_NewObject(req->wrk, req->body_oc, TRANSIENT_STORAGE, 8));
 
-	vfc->http = req->http;
 	vfc->oc = req->body_oc;
 	V1F_Setup_Fetch(vfc, req->htc);
 
 	if (VFP_Open(vfc) < 0) {
 		req->req_body_status = REQ_BODY_FAIL;
+		HSH_DerefBusy(req->wrk, req->body_oc);
+		AZ(HSH_DerefObjCore(req->wrk, &req->body_oc));
 		return (-1);
 	}
 
@@ -239,6 +242,8 @@ VRB_Cache(struct req *req, ssize_t maxsize)
 		if (req->req_bodybytes > maxsize) {
 			req->req_body_status = REQ_BODY_FAIL;
 			(void)VFP_Error(vfc, "Request body too big to cache");
+			HSH_DerefBusy(req->wrk, req->body_oc);
+			AZ(HSH_DerefObjCore(req->wrk, &req->body_oc));
 			VFP_Close(vfc);
 			return(-1);
 		}
@@ -286,5 +291,6 @@ VRB_Cache(struct req *req, ssize_t maxsize)
 		req->req_body_status = REQ_BODY_FAIL;
 	}
 	VSLb_ts_req(req, "ReqBody", VTIM_real());
+	HSH_DerefBusy(req->wrk, req->body_oc);
 	return (vfps == VFP_END ? req->req_bodybytes : -1);
 }
diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c
index feb41d6..e468aab 100644
--- a/bin/varnishd/cache/cache_req_fsm.c
+++ b/bin/varnishd/cache/cache_req_fsm.c
@@ -191,8 +191,7 @@ cnt_synth(struct worker *wrk, struct req *req)
 	}
 	assert(wrk->handling == VCL_RET_DELIVER);
 
-	req->objcore = HSH_Private(wrk, 0);
-	AZ(req->objcore->boc);
+	req->objcore = HSH_Private(wrk);
 	CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
 	szl = -1;
 	if (STV_NewObject(wrk, req->objcore, TRANSIENT_STORAGE, 1024)) {
@@ -208,6 +207,7 @@ cnt_synth(struct worker *wrk, struct req *req)
 		}
 	}
 
+	HSH_DerefBusy(wrk, req->objcore);
 	VSB_delete(synth_body);
 
 	if (szl < 0) {
@@ -551,7 +551,7 @@ cnt_pass(struct worker *wrk, struct req *req)
 		break;
 	case VCL_RET_FETCH:
 		wrk->stats->s_pass++;
-		req->objcore = HSH_Private(wrk, 1);
+		req->objcore = HSH_Private(wrk);
 		CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
 		VBF_Fetch(wrk, req, req->objcore, NULL, VBF_PASS);
 		req->req_step = R_STP_FETCH;
diff --git a/bin/varnishd/hash/hash_slinger.h b/bin/varnishd/hash/hash_slinger.h
index e9752be..54becbb 100644
--- a/bin/varnishd/hash/hash_slinger.h
+++ b/bin/varnishd/hash/hash_slinger.h
@@ -73,7 +73,7 @@ void HSH_Purge(struct worker *, struct objhead *, double ttl, double grace,
 void HSH_config(const char *h_arg);
 struct boc *HSH_RefBusy(const struct objcore *);
 void HSH_DerefBusy(struct worker *wrk, struct objcore *);
-struct objcore *HSH_Private(struct worker *wrk, int wantboc);
+struct objcore *HSH_Private(struct worker *wrk);
 void HSH_Abandon(struct objcore *oc);
 
 #ifdef VARNISH_CACHE_CHILD
diff --git a/bin/varnishd/storage/storage.h b/bin/varnishd/storage/storage.h
index d00db91..32b7e47 100644
--- a/bin/varnishd/storage/storage.h
+++ b/bin/varnishd/storage/storage.h
@@ -121,7 +121,7 @@ uintmax_t STV_FileSize(int fd, const char *size, unsigned *granularity,
 
 /*--------------------------------------------------------------------*/
 struct lru *LRU_Alloc(void);
-void LRU_Free(struct lru *);
+void LRU_Free(struct lru **);
 void LRU_Add(struct objcore *, double now);
 void LRU_Remove(struct objcore *);
 int LRU_NukeOne(struct worker *, struct lru *);
diff --git a/bin/varnishd/storage/storage_lru.c b/bin/varnishd/storage/storage_lru.c
index 4e7ae2e..54b8619 100644
--- a/bin/varnishd/storage/storage_lru.c
+++ b/bin/varnishd/storage/storage_lru.c
@@ -71,10 +71,17 @@ LRU_Alloc(void)
 }
 
 void
-LRU_Free(struct lru *lru)
+LRU_Free(struct lru **pp)
 {
+	struct lru *lru;
+
+	AN(pp);
+	lru = *pp;
+	*pp = NULL;
 	CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
+	Lck_Lock(&lru->mtx);
 	AN(VTAILQ_EMPTY(&lru->lru_head));
+	Lck_Unlock(&lru->mtx);
 	Lck_Delete(&lru->mtx);
 	FREE_OBJ(lru);
 }
@@ -85,12 +92,15 @@ LRU_Add(struct objcore *oc, double now)
 	struct lru *lru;
 
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	AZ(oc->boc);
+	AN(isnan(oc->last_lru));
 	AZ(isnan(now));
 	lru = lru_get(oc);
 	CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
 	Lck_Lock(&lru->mtx);
 	VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
 	oc->last_lru = now;
+	AZ(isnan(oc->last_lru));
 	Lck_Unlock(&lru->mtx);
 }
 
@@ -100,13 +110,13 @@ LRU_Remove(struct objcore *oc)
 	struct lru *lru;
 
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	AZ(oc->boc);
 	lru = lru_get(oc);
 	CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
 	Lck_Lock(&lru->mtx);
-	if (!isnan(oc->last_lru)) {
-		VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
-		oc->last_lru = NAN;
-	}
+	AZ(isnan(oc->last_lru));
+	VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
+	oc->last_lru = NAN;
 	Lck_Unlock(&lru->mtx);
 }
 
@@ -172,7 +182,7 @@ LRU_NukeOne(struct worker *wrk, struct lru *lru)
 		if (ObjSnipe(wrk, oc)) {
 			VSC_C_main->n_lru_nuked++; // XXX per lru ?
 			VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
-			oc->last_lru = NAN;
+			VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
 			break;
 		}
 	}
diff --git a/bin/varnishd/storage/storage_persistent_silo.c b/bin/varnishd/storage/storage_persistent_silo.c
index 60ec88e..df159e1 100644
--- a/bin/varnishd/storage/storage_persistent_silo.c
+++ b/bin/varnishd/storage/storage_persistent_silo.c
@@ -51,7 +51,7 @@
 #include "storage/storage_persistent.h"
 
 /*
- * We use the low bit to mark objects still needing fixup
+ * We use the top bit to mark objects still needing fixup
  * In theory this may need to be platform dependent
  */
 
@@ -106,7 +106,7 @@ smp_save_segs(struct smp_sc *sc)
 		if (sg == sc->cur_seg)
 			continue;
 		VTAILQ_REMOVE(&sc->segments, sg, list);
-		LRU_Free(sg->lru);
+		LRU_Free(&sg->lru);
 		FREE_OBJ(sg);
 	}
 	smp_save_seg(sc, &sc->seg1);
@@ -159,7 +159,7 @@ smp_load_seg(struct worker *wrk, const struct smp_sc *sc,
 	for (;no > 0; so++,no--) {
 		if (EXP_When(&so->exp) < t_now)
 			continue;
-		oc = ObjNew(wrk, 0);
+		oc = ObjNew(wrk);
 		oc->flags &= ~OC_F_BUSY;
 		oc->stobj->stevedore = sc->parent;
 		smp_init_oc(oc, sg, no);
@@ -169,6 +169,9 @@ smp_load_seg(struct worker *wrk, const struct smp_sc *sc,
 		oc->exp = so->exp;
 		sg->nobj++;
 		EXP_Inject(wrk, oc);
+		AN(isnan(oc->last_lru));
+		HSH_DerefBusy(wrk, oc);	// XXX Keep it an stream resurrection?
+		AZ(isnan(oc->last_lru));
 	}
 	Pool_Sumstat(wrk);
 	sg->flags |= SMP_SEG_LOADED;
@@ -272,7 +275,7 @@ smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
 		assert(sg->p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
 		assert(sg->p.offset < sc->mediasize);
 		sc->free_offset = sg->p.offset;
-		LRU_Free(sg->lru);
+		LRU_Free(&sg->lru);
 		FREE_OBJ(sg);
 		return;
 	}
@@ -507,6 +510,8 @@ smp_oc_objfree(struct worker *wrk, struct objcore *oc)
 		sg->nfixed--;
 		wrk->stats->n_object--;
 	}
+	AZ(isnan(oc->last_lru));
+	LRU_Remove(oc);
 
 	Lck_Unlock(&sg->sc->mtx);
 	memset(oc->stobj, 0, sizeof oc->stobj);
diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c
index 84cf755..f27bc95 100644
--- a/bin/varnishd/storage/storage_simple.c
+++ b/bin/varnishd/storage/storage_simple.c
@@ -206,7 +206,8 @@ sml_objfree(struct worker *wrk, struct objcore *oc)
 	CAST_OBJ_NOTNULL(o, oc->stobj->priv, OBJECT_MAGIC);
 	o->magic = 0;
 
-	LRU_Remove(oc);
+	if (oc->boc == NULL)
+		LRU_Remove(oc);
 
 	sml_stv_free(oc->stobj->stevedore, o->objstore);
 



More information about the varnish-commit mailing list