[4.0] 405aad9 Split the oc->flags into those locked by oh->mtx, and those locked by lru->mtx.

Poul-Henning Kamp phk at FreeBSD.org
Tue Apr 1 15:09:53 CEST 2014


commit 405aad97af2482ec28df3e248055d0cd512a7fb0
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Tue Mar 25 08:29:05 2014 +0000

    Split the oc->flags into those locked by oh->mtx, and those locked
    by lru->mtx.
    
    Spotted by: Martin

diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index 01ec775..68c74ff 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -422,17 +422,21 @@ struct objcore {
 	struct objhead		*objhead;
 	struct busyobj		*busyobj;
 	double			timer_when;
-	unsigned		flags;
+
+	uint16_t		flags;
 #define OC_F_BUSY		(1<<1)
 #define OC_F_PASS		(1<<2)
-#define OC_F_OFFLRU		(1<<4)
 #define OC_F_PRIV		(1<<5)		/* Stevedore private flag */
-#define OC_F_DYING		(1<<7)
 #define OC_F_PRIVATE		(1<<8)
 #define OC_F_FAILED		(1<<9)
-#define OC_F_MOVE		(1<<10)
-#define OC_F_INSERT		(1<<11)
-#define OC_F_EXP		(1<<12)
+
+	uint16_t		exp_flags;
+#define OC_EF_OFFLRU		(1<<4)
+#define OC_EF_MOVE		(1<<10)
+#define OC_EF_INSERT		(1<<11)
+#define OC_EF_EXP		(1<<12)
+#define OC_EF_DYING		(1<<7)
+
 	unsigned		timer_idx;
 	VTAILQ_ENTRY(objcore)	list;
 	VTAILQ_ENTRY(objcore)	lru_list;
diff --git a/bin/varnishd/cache/cache_expire.c b/bin/varnishd/cache/cache_expire.c
index 4f21152..8bd0707 100644
--- a/bin/varnishd/cache/cache_expire.c
+++ b/bin/varnishd/cache/cache_expire.c
@@ -111,9 +111,9 @@ exp_mail_it(struct objcore *oc)
 {
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 
-	AN(oc->flags & OC_F_OFFLRU);
+	AN(oc->exp_flags & OC_EF_OFFLRU);
 	Lck_Lock(&exphdl->mtx);
-	if (oc->flags & OC_F_DYING)
+	if (oc->exp_flags & OC_EF_DYING)
 		VTAILQ_INSERT_HEAD(&exphdl->inbox, oc, lru_list);
 	else
 		VTAILQ_INSERT_TAIL(&exphdl->inbox, oc, lru_list);
@@ -135,8 +135,8 @@ EXP_Inject(struct objcore *oc, struct lru *lru, double when)
 
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 
-	AZ(oc->flags & (OC_F_OFFLRU | OC_F_INSERT | OC_F_MOVE));
-	AZ(oc->flags & OC_F_DYING);
+	AZ(oc->exp_flags & (OC_EF_OFFLRU | OC_EF_INSERT | OC_EF_MOVE));
+	AZ(oc->exp_flags & OC_EF_DYING);
 	// AN(oc->flags & OC_F_BUSY);
 
 	if (lru == NULL)
@@ -145,9 +145,9 @@ EXP_Inject(struct objcore *oc, struct lru *lru, double when)
 
 	Lck_Lock(&lru->mtx);
 	lru->n_objcore++;
-	oc->flags |= OC_F_OFFLRU | OC_F_INSERT | OC_F_EXP;
+	oc->exp_flags |= OC_EF_OFFLRU | OC_EF_INSERT | OC_EF_EXP;
 	if (when < 0)
-		oc->flags |= OC_F_MOVE;
+		oc->exp_flags |= OC_EF_MOVE;
 	else
 		oc->timer_when = when;
 	Lck_Unlock(&lru->mtx);
@@ -205,9 +205,9 @@ EXP_Touch(struct objcore *oc, double now)
 	if (Lck_Trylock(&lru->mtx))
 		return;
 
-	AN(oc->flags & OC_F_EXP);
+	AN(oc->exp_flags & OC_EF_EXP);
 
-	if (!(oc->flags & OC_F_OFFLRU)) {
+	if (!(oc->exp_flags & OC_EF_OFFLRU)) {
 		/* Can only touch it while it's actually on the LRU list */
 		VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
 		VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
@@ -234,7 +234,7 @@ EXP_Rearm(struct object *o, double now, double ttl, double grace, double keep)
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 	assert(oc->refcnt > 0);
 
-	AN(oc->flags & OC_F_EXP);
+	AN(oc->exp_flags & OC_EF_EXP);
 
 	if (!isnan(ttl))
 		o->exp.ttl = now + ttl - o->exp.t_origin;
@@ -257,14 +257,14 @@ EXP_Rearm(struct object *o, double now, double ttl, double grace, double keep)
 	Lck_Lock(&lru->mtx);
 
 	if (!isnan(now) && when <= now)
-		oc->flags |= OC_F_DYING;
+		oc->exp_flags |= OC_EF_DYING;
 	else
-		oc->flags |= OC_F_MOVE;
+		oc->exp_flags |= OC_EF_MOVE;
 
-	if (oc->flags & OC_F_OFFLRU) {
+	if (oc->exp_flags & OC_EF_OFFLRU) {
 		oc = NULL;
 	} else {
-		oc->flags |= OC_F_OFFLRU;
+		oc->exp_flags |= OC_EF_OFFLRU;
 		VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
 	}
 	Lck_Unlock(&lru->mtx);
@@ -294,8 +294,8 @@ EXP_NukeOne(struct busyobj *bo, struct lru *lru)
 		VSLb(bo->vsl, SLT_ExpKill, "LRU_Cand p=%p f=0x%x r=%d",
 		    oc, oc->flags, oc->refcnt);
 
-		AZ(oc->flags & OC_F_OFFLRU);
-		AZ(oc->flags & OC_F_DYING);
+		AZ(oc->exp_flags & OC_EF_OFFLRU);
+		AZ(oc->exp_flags & OC_EF_DYING);
 
 		/*
 		 * It wont release any space if we cannot release the last
@@ -309,7 +309,7 @@ EXP_NukeOne(struct busyobj *bo, struct lru *lru)
 		if (Lck_Trylock(&oh->mtx))
 			continue;
 		if (oc->refcnt == 1) {
-			oc->flags |= OC_F_DYING | OC_F_OFFLRU;
+			oc->exp_flags |= OC_EF_DYING | OC_EF_OFFLRU;
 			oc->refcnt++;
 			VSC_C_main->n_lru_nuked++; // XXX per lru ?
 			VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
@@ -434,20 +434,20 @@ exp_inbox(struct exp_priv *ep, struct objcore *oc, double now)
 
 	/* Evacuate our action-flags, and put it back on the LRU list */
 	Lck_Lock(&lru->mtx);
-	flags = oc->flags;
-	AN(flags & OC_F_OFFLRU);
-	oc->flags &= ~(OC_F_INSERT | OC_F_MOVE);
+	flags = oc->exp_flags;
+	AN(flags & OC_EF_OFFLRU);
+	oc->exp_flags &= ~(OC_EF_INSERT | OC_EF_MOVE);
 	oc->last_lru = now;
-	if (!(flags & OC_F_DYING)) {
+	if (!(flags & OC_EF_DYING)) {
 		VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
-		oc->flags &= ~OC_F_OFFLRU;
+		oc->exp_flags &= ~OC_EF_OFFLRU;
 	}
 	Lck_Unlock(&lru->mtx);
 
-	if (flags & OC_F_DYING) {
+	if (flags & OC_EF_DYING) {
 		VSLb(&ep->vsl, SLT_ExpKill, "EXP_Kill p=%p e=%.9f f=0x%x", oc,
 		    oc->timer_when, oc->flags);
-		if (!(flags & OC_F_INSERT)) {
+		if (!(flags & OC_EF_INSERT)) {
 			assert(oc->timer_idx != BINHEAP_NOIDX);
 			binheap_delete(ep->heap, oc->timer_idx);
 		}
@@ -456,7 +456,7 @@ exp_inbox(struct exp_priv *ep, struct objcore *oc, double now)
 		return;
 	}
 
-	if (flags & OC_F_MOVE) {
+	if (flags & OC_EF_MOVE) {
 		o = oc_getobj(&ep->wrk->stats, oc);
 		CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
 		oc->timer_when = exp_when(o);
@@ -472,11 +472,11 @@ exp_inbox(struct exp_priv *ep, struct objcore *oc, double now)
 	 * XXX: the next moment and rip them out again.
 	 */
 
-	if (flags & OC_F_INSERT) {
+	if (flags & OC_EF_INSERT) {
 		assert(oc->timer_idx == BINHEAP_NOIDX);
 		binheap_insert(exphdl->heap, oc);
 		assert(oc->timer_idx != BINHEAP_NOIDX);
-	} else if (flags & OC_F_MOVE) {
+	} else if (flags & OC_EF_MOVE) {
 		assert(oc->timer_idx != BINHEAP_NOIDX);
 		binheap_reorder(exphdl->heap, oc->timer_idx);
 		assert(oc->timer_idx != BINHEAP_NOIDX);
@@ -514,11 +514,11 @@ exp_expire(struct exp_priv *ep, double now)
 	CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
 	Lck_Lock(&lru->mtx);
 	// AZ(oc->flags & OC_F_BUSY);
-	oc->flags |= OC_F_DYING;
-	if (oc->flags & OC_F_OFFLRU)
+	oc->exp_flags |= OC_EF_DYING;
+	if (oc->exp_flags & OC_EF_OFFLRU)
 		oc = NULL;
 	else {
-		oc->flags |= OC_F_OFFLRU;
+		oc->exp_flags |= OC_EF_OFFLRU;
 		VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
 	}
 	Lck_Unlock(&lru->mtx);
diff --git a/bin/varnishd/cache/cache_hash.c b/bin/varnishd/cache/cache_hash.c
index d7263e1..6cb2de8 100644
--- a/bin/varnishd/cache/cache_hash.c
+++ b/bin/varnishd/cache/cache_hash.c
@@ -401,7 +401,9 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp,
 		CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 		assert(oc->objhead == oh);
 
-		if (oc->flags & (OC_F_FAILED | OC_F_DYING))
+		if (oc->exp_flags & OC_EF_DYING)
+			continue;
+		if (oc->flags & OC_F_FAILED)
 			continue;
 
 		if (oc->flags & OC_F_BUSY) {
@@ -572,7 +574,7 @@ HSH_Purge(struct worker *wrk, struct objhead *oh, double ttl, double grace)
 	VTAILQ_FOREACH(oc, &oh->objcs, list) {
 		CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 		assert(oc->objhead == oh);
-		if (oc->flags & (OC_F_BUSY|OC_F_DYING)) {
+		if (oc->flags & OC_F_BUSY) {
 			/*
 			 * We cannot purge busy objects here, because their
 			 * owners have special rights to them, and may nuke
@@ -581,6 +583,8 @@ HSH_Purge(struct worker *wrk, struct objhead *oh, double ttl, double grace)
 			 */
 			continue;
 		}
+		if (oc->exp_flags & OC_EF_DYING)
+			continue;
 		xxxassert(spc >= sizeof *ocp);
 		oc->refcnt++;
 		spc -= sizeof *ocp;
@@ -680,7 +684,7 @@ HSH_Unbusy(struct dstat *ds, struct objcore *oc)
 	if (!(oc->flags & OC_F_PRIVATE)) {
 		BAN_NewObjCore(oc);
 		EXP_Insert(oc);
-		AN(oc->flags & OC_F_EXP);
+		AN(oc->exp_flags & OC_EF_EXP);
 		AN(oc->ban);
 	}
 
diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c
index d3d70d6..fd9783c 100644
--- a/bin/varnishd/cache/cache_req_fsm.c
+++ b/bin/varnishd/cache/cache_req_fsm.c
@@ -100,7 +100,7 @@ cnt_deliver(struct worker *wrk, struct req *req)
 	assert(req->obj->objcore->refcnt > 0);
 
 	req->t_resp = W_TIM_real(wrk);
-	if (req->obj->objcore->flags & OC_F_EXP)
+	if (req->obj->objcore->exp_flags & OC_EF_EXP)
 		EXP_Touch(req->obj->objcore, req->t_resp);
 
 	HTTP_Setup(req->resp, req->ws, req->vsl, SLT_RespMethod);
@@ -399,7 +399,8 @@ cnt_lookup(struct worker *wrk, struct req *req)
 	switch (wrk->handling) {
 	case VCL_RET_DELIVER:
 		if (boc != NULL) {
-			AZ(oc->flags & (OC_F_FAILED|OC_F_DYING|OC_F_PASS));
+			AZ(oc->flags & (OC_F_FAILED|OC_F_PASS));
+			AZ(oc->exp_flags & OC_EF_DYING);
 			AZ(oc->busyobj);
 			VBF_Fetch(wrk, req, boc, o, VBF_BACKGROUND);
 		} else {



More information about the varnish-commit mailing list