[master] 9f8dba3 Start the divorce that separates struct busyobj into a "busy objcore" (struct boc) and "the backend fetch context" (struct bereq).

Poul-Henning Kamp phk at FreeBSD.org
Thu Jan 28 12:49:18 CET 2016


commit 9f8dba345dc67c054172edb9f4c0a25942060987
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Tue Jan 19 10:26:15 2016 +0000

    Start the divorce that separates struct busyobj into a "busy objcore"
    (struct boc) and "the backend fetch context" (struct bereq).

diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index 4bdea99..4a3ce05 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -374,6 +374,33 @@ struct storeobj {
 	uintptr_t		priv2;
 };
 
+/* Busy Objcore structure --------------------------------------------
+ *
+ */
+
+/*
+ * The macro-states we expose outside the fetch code
+ */
+enum busyobj_state_e {
+	BOS_INVALID = 0,	/* don't touch (yet) */
+	BOS_REQ_DONE,		/* beresp.* can be examined */
+	BOS_STREAM,		/* beresp.* can be examined */
+	BOS_FINISHED,		/* object is complete */
+	BOS_FAILED,		/* something went wrong */
+};
+
+struct boc {
+	unsigned		magic;
+#define BOC_MAGIC		0x70c98476
+	unsigned		refcount;
+	struct lock		mtx;
+	pthread_cond_t		cond;
+	void			*stevedore_priv;
+	enum busyobj_state_e	state;
+	uint8_t			*vary;
+
+};
+
 /* Object core structure ---------------------------------------------
  * Objects have sideways references in the binary heap and the LRU list
  * and we want to avoid paging in a lot of objects just to move them up
@@ -425,17 +452,6 @@ struct objcore {
  * streaming delivery will make use of.
  */
 
-/*
- * The macro-states we expose outside the fetch code
- */
-enum busyobj_state_e {
-	BOS_INVALID = 0,	/* don't touch (yet) */
-	BOS_REQ_DONE,		/* beresp.* can be examined */
-	BOS_STREAM,		/* beresp.* can be examined */
-	BOS_FINISHED,		/* object is complete */
-	BOS_FAILED,		/* something went wrong */
-};
-
 enum director_state_e {
 	DIR_S_NULL = 0,
 	DIR_S_HDRS = 1,
@@ -445,25 +461,21 @@ enum director_state_e {
 struct busyobj {
 	unsigned		magic;
 #define BUSYOBJ_MAGIC		0x23b95567
-	struct lock		mtx;
-	pthread_cond_t		cond;
+
+	struct boc		boc[1];
+
 	char			*end;
 
 	/*
 	 * All fields from refcount and down are zeroed when the busyobj
 	 * is recycled.
 	 */
-	unsigned		refcount;
 	int			retries;
 	struct req		*req;
 	struct worker		*wrk;
 
-	uint8_t			*vary;
-
 	struct vfp_ctx		vfc[1];
 
-	enum busyobj_state_e	state;
-
 	struct ws		ws[1];
 	char			*ws_bo;
 	struct http		*bereq0;
@@ -500,8 +512,6 @@ struct busyobj {
 
 	struct vsl_log		vsl[1];
 
-	void			*stevedore_priv;
-
 	uint8_t			digest[DIGEST_LEN];
 	struct vrt_privs	privs[1];
 };
diff --git a/bin/varnishd/cache/cache_busyobj.c b/bin/varnishd/cache/cache_busyobj.c
index a0a5abb..3f63eb5 100644
--- a/bin/varnishd/cache/cache_busyobj.c
+++ b/bin/varnishd/cache/cache_busyobj.c
@@ -68,8 +68,9 @@ vbo_New(void)
 	XXXAN(bo);
 	bo->magic = BUSYOBJ_MAGIC;
 	bo->end = (char *)bo + sz;
-	Lck_New(&bo->mtx, lck_busyobj);
-	AZ(pthread_cond_init(&bo->cond, NULL));
+	INIT_OBJ(bo->boc, BOC_MAGIC);
+	Lck_New(&bo->boc->mtx, lck_busyobj);
+	AZ(pthread_cond_init(&bo->boc->cond, NULL));
 	return (bo);
 }
 
@@ -83,9 +84,9 @@ vbo_Free(struct busyobj **bop)
 	*bop = NULL;
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
 	AZ(bo->htc);
-	AZ(bo->refcount);
-	AZ(pthread_cond_destroy(&bo->cond));
-	Lck_Delete(&bo->mtx);
+	AZ(bo->boc->refcount);
+	AZ(pthread_cond_destroy(&bo->boc->cond));
+	Lck_Delete(&bo->boc->mtx);
 	MPL_Free(vbopool, bo);
 }
 
@@ -101,9 +102,9 @@ VBO_GetBusyObj(struct worker *wrk, const struct req *req)
 
 	bo = vbo_New();
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
-	AZ(bo->refcount);
+	AZ(bo->boc->refcount);
 
-	bo->refcount = 1;
+	bo->boc->refcount = 1;
 
 	p = (void*)(bo + 1);
 	p = (void*)PRNDUP(p);
@@ -171,14 +172,14 @@ VBO_DerefBusyObj(struct worker *wrk, struct busyobj **pbo)
 		CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 		CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC);
 		Lck_Lock(&oc->objhead->mtx);
-		assert(bo->refcount > 0);
-		r = --bo->refcount;
+		assert(bo->boc->refcount > 0);
+		r = --bo->boc->refcount;
 		Lck_Unlock(&oc->objhead->mtx);
 	} else {
-		Lck_Lock(&bo->mtx);
-		assert(bo->refcount > 0);
-		r = --bo->refcount;
-		Lck_Unlock(&bo->mtx);
+		Lck_Lock(&bo->boc->mtx);
+		assert(bo->boc->refcount > 0);
+		r = --bo->boc->refcount;
+		Lck_Unlock(&bo->boc->mtx);
 	}
 
 	if (r)
@@ -211,11 +212,16 @@ VBO_DerefBusyObj(struct worker *wrk, struct busyobj **pbo)
 
 	VCL_Rel(&bo->vcl);
 
-	if (bo->vary != NULL)
-		free(bo->vary);
+	AZ(bo->boc->stevedore_priv);
+	AZ(bo->boc->refcount);
+	bo->boc->state = BOS_INVALID;
+	if (bo->boc->vary != NULL) {
+		free(bo->boc->vary);
+		bo->boc->vary = NULL;
+	}
 
-	memset(&bo->refcount, 0,
-	    sizeof *bo - offsetof(struct busyobj, refcount));
+	memset(&bo->retries, 0,
+	    sizeof *bo - offsetof(struct busyobj, retries));
 
 	vbo_Free(&bo);
 }
@@ -231,10 +237,10 @@ VBO_extend(struct worker *wrk, struct objcore *oc, struct busyobj *bo,
 	if (l == 0)
 		return;
 	assert(l > 0);
-	Lck_Lock(&bo->mtx);
+	Lck_Lock(&bo->boc->mtx);
 	ObjExtend(wrk, oc, l);
-	AZ(pthread_cond_broadcast(&bo->cond));
-	Lck_Unlock(&bo->mtx);
+	AZ(pthread_cond_broadcast(&bo->boc->cond));
+	Lck_Unlock(&bo->boc->mtx);
 }
 
 ssize_t
@@ -246,16 +252,16 @@ VBO_waitlen(struct worker *wrk, struct objcore *oc, struct busyobj *bo,
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
 	xxxassert(bo->fetch_objcore == oc);
-	Lck_Lock(&bo->mtx);
+	Lck_Lock(&bo->boc->mtx);
 	rv = ObjGetLen(wrk, oc);
 	while (1) {
-		assert(l <= rv || bo->state == BOS_FAILED);
-		if (rv > l || bo->state >= BOS_FINISHED)
+		assert(l <= rv || bo->boc->state == BOS_FAILED);
+		if (rv > l || bo->boc->state >= BOS_FINISHED)
 			break;
-		(void)Lck_CondWait(&bo->cond, &bo->mtx, 0);
+		(void)Lck_CondWait(&bo->boc->cond, &bo->boc->mtx, 0);
 		rv = ObjGetLen(wrk, oc);
 	}
-	Lck_Unlock(&bo->mtx);
+	Lck_Unlock(&bo->boc->mtx);
 	return (rv);
 }
 
@@ -264,21 +270,21 @@ VBO_setstate(struct busyobj *bo, enum busyobj_state_e next)
 {
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
 	assert(bo->do_stream || next != BOS_STREAM);
-	assert(next > bo->state);
-	Lck_Lock(&bo->mtx);
-	bo->state = next;
-	AZ(pthread_cond_broadcast(&bo->cond));
-	Lck_Unlock(&bo->mtx);
+	assert(next > bo->boc->state);
+	Lck_Lock(&bo->boc->mtx);
+	bo->boc->state = next;
+	AZ(pthread_cond_broadcast(&bo->boc->cond));
+	Lck_Unlock(&bo->boc->mtx);
 }
 
 void
 VBO_waitstate(struct busyobj *bo, enum busyobj_state_e want)
 {
-	Lck_Lock(&bo->mtx);
+	Lck_Lock(&bo->boc->mtx);
 	while (1) {
-		if (bo->state >= want)
+		if (bo->boc->state >= want)
 			break;
-		(void)Lck_CondWait(&bo->cond, &bo->mtx, 0);
+		(void)Lck_CondWait(&bo->boc->cond, &bo->boc->mtx, 0);
 	}
-	Lck_Unlock(&bo->mtx);
+	Lck_Unlock(&bo->boc->mtx);
 }
diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c
index f781111..32b58ac 100644
--- a/bin/varnishd/cache/cache_fetch.c
+++ b/bin/varnishd/cache/cache_fetch.c
@@ -168,7 +168,7 @@ vbf_stp_mkbereq(const struct worker *wrk, struct busyobj *bo)
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
 	CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC);
 
-	assert(bo->state == BOS_INVALID);
+	assert(bo->boc->state == BOS_INVALID);
 	AZ(bo->storage_hint);
 
 	HTTP_Setup(bo->bereq0, bo->ws, bo->vsl, SLT_BereqMethod);
@@ -221,7 +221,7 @@ vbf_stp_retry(struct worker *wrk, struct busyobj *bo)
 	vfc = bo->vfc;
 	CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC);
 
-	assert(bo->state == BOS_REQ_DONE);
+	assert(bo->boc->state == BOS_REQ_DONE);
 
 	VSLb_ts_busyobj(bo, "Retry", W_TIM_real(wrk));
 
@@ -281,7 +281,7 @@ vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo)
 
 	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod);
 
-	assert(bo->state <= BOS_REQ_DONE);
+	assert(bo->boc->state <= BOS_REQ_DONE);
 
 	AZ(bo->htc);
 	i = VDI_GetHdr(wrk, bo);
@@ -451,7 +451,7 @@ vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo)
 		return (F_STP_ERROR);
 	}
 
-	assert(bo->state == BOS_REQ_DONE);
+	assert(bo->boc->state == BOS_REQ_DONE);
 
 	if (bo->do_esi)
 		bo->do_stream = 0;
@@ -642,9 +642,9 @@ vbf_stp_fetch(struct worker *wrk, struct busyobj *bo)
 	if (bo->htc->body_status != BS_NONE)
 		AZ(VDI_GetBody(bo->wrk, bo));
 
-	assert(bo->refcount >= 1);
+	assert(bo->boc->refcount >= 1);
 
-	assert(bo->state == BOS_REQ_DONE);
+	assert(bo->boc->state == BOS_REQ_DONE);
 
 	if (bo->do_stream) {
 		HSH_Unbusy(wrk, bo->fetch_objcore);
@@ -663,7 +663,7 @@ vbf_stp_fetch(struct worker *wrk, struct busyobj *bo)
 	if (bo->vfc->failed) {
 		VDI_Finish(bo->wrk, bo);
 		if (!bo->do_stream) {
-			assert(bo->state < BOS_STREAM);
+			assert(bo->boc->state < BOS_STREAM);
 			// XXX: doclose = ?
 			return (F_STP_ERROR);
 		} else {
@@ -672,9 +672,9 @@ vbf_stp_fetch(struct worker *wrk, struct busyobj *bo)
 	}
 
 	if (bo->do_stream)
-		assert(bo->state == BOS_STREAM);
+		assert(bo->boc->state == BOS_STREAM);
 	else {
-		assert(bo->state == BOS_REQ_DONE);
+		assert(bo->boc->state == BOS_REQ_DONE);
 		HSH_Unbusy(wrk, bo->fetch_objcore);
 	}
 
@@ -872,7 +872,7 @@ vbf_stp_fail(struct worker *wrk, struct busyobj *bo)
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
 	CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC);
 
-	assert(bo->state < BOS_FINISHED);
+	assert(bo->boc->state < BOS_FINISHED);
 	HSH_Fail(bo->fetch_objcore);
 	if (bo->fetch_objcore->exp_flags & OC_EF_EXP) {
 		/* Already unbusied - expire it */
@@ -926,7 +926,7 @@ vbf_fetch_thread(struct worker *wrk, void *priv)
 
 	while (stp != F_STP_DONE) {
 		CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
-		assert(bo->refcount >= 1);
+		assert(bo->boc->refcount >= 1);
 		switch(stp) {
 #define FETCH_STEP(l, U, arg)						\
 		case F_STP_##U:						\
@@ -944,7 +944,7 @@ vbf_fetch_thread(struct worker *wrk, void *priv)
 	http_Teardown(bo->bereq);
 	http_Teardown(bo->beresp);
 
-	if (bo->state == BOS_FINISHED) {
+	if (bo->boc->state == BOS_FINISHED) {
 		AZ(bo->fetch_objcore->flags & OC_F_FAILED);
 		HSH_Complete(bo->fetch_objcore);
 		VSLb(bo->vsl, SLT_Length, "%ju",
@@ -992,7 +992,7 @@ VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
 	THR_SetBusyobj(bo);
 
 	bo_fetch = bo;
-	bo->refcount = 2;
+	bo->boc->refcount = 2;
 
 	oc->busyobj = bo;
 
@@ -1001,7 +1001,7 @@ VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
 	if (mode == VBF_PASS)
 		bo->do_pass = 1;
 
-	bo->vary = req->vary_b;
+	bo->boc->vary = req->vary_b;
 	req->vary_b = NULL;
 
 	if (mode != VBF_BACKGROUND)
@@ -1034,7 +1034,7 @@ VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
 			VBO_waitstate(bo, BOS_REQ_DONE);
 		} else {
 			VBO_waitstate(bo, BOS_STREAM);
-			if (bo->state == BOS_FAILED) {
+			if (bo->boc->state == BOS_FAILED) {
 				AN((oc->flags & OC_F_FAILED));
 			} else {
 				AZ(bo->fetch_objcore->flags & OC_F_BUSY);
diff --git a/bin/varnishd/cache/cache_hash.c b/bin/varnishd/cache/cache_hash.c
index 12fd0d6..a670143 100644
--- a/bin/varnishd/cache/cache_hash.c
+++ b/bin/varnishd/cache/cache_hash.c
@@ -397,8 +397,8 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp,
 				continue;
 
 			if (oc->busyobj != NULL &&
-			    oc->busyobj->vary != NULL &&
-			    !VRY_Match(req, oc->busyobj->vary))
+			    oc->busyobj->boc->vary != NULL &&
+			    !VRY_Match(req, oc->busyobj->boc->vary))
 				continue;
 
 			busy_found = 1;
@@ -720,7 +720,7 @@ HSH_RefBusy(const struct objcore *oc)
 	bo = oc->busyobj;
 	CHECK_OBJ_ORNULL(bo, BUSYOBJ_MAGIC);
 	if (bo != NULL)
-		bo->refcount++;
+		bo->boc->refcount++;
 	Lck_Unlock(&oh->mtx);
 	return (bo);
 }
diff --git a/bin/varnishd/cache/cache_panic.c b/bin/varnishd/cache/cache_panic.c
index 0613604..6dbc3c0 100644
--- a/bin/varnishd/cache/cache_panic.c
+++ b/bin/varnishd/cache/cache_panic.c
@@ -272,10 +272,10 @@ pan_busyobj(struct vsb *vsb, const struct busyobj *bo)
 	VSB_printf(vsb, "busyobj = %p {\n", bo);
 	VSB_indent(vsb, 2);
 	pan_ws(vsb, bo->ws);
-	VSB_printf(vsb, "refcnt = %u,\n", bo->refcount);
+	VSB_printf(vsb, "refcnt = %u,\n", bo->boc->refcount);
 	VSB_printf(vsb, "retries = %d, ", bo->retries);
 	VSB_printf(vsb, "failed = %d, ", bo->vfc->failed);
-	VSB_printf(vsb, "state = %d,\n", (int)bo->state);
+	VSB_printf(vsb, "state = %d,\n", (int)bo->boc->state);
 	VSB_printf(vsb, "flags = {");
 	p = "";
 	/*lint -save -esym(438,p) */
diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c
index fda7912..0c793ef 100644
--- a/bin/varnishd/cache/cache_req_fsm.c
+++ b/bin/varnishd/cache/cache_req_fsm.c
@@ -199,7 +199,7 @@ cnt_deliver(struct worker *wrk, struct req *req)
 	/* Grab a ref to the bo if there is one, and hand it down */
 	bo = HSH_RefBusy(req->objcore);
 	if (bo != NULL) {
-		if (req->esi_level == 0 && bo->state == BOS_FINISHED) {
+		if (req->esi_level == 0 && bo->boc->state == BOS_FINISHED) {
 			VBO_DerefBusyObj(wrk, &bo);
 		} else if (!bo->do_stream) {
 			VBO_waitstate(bo, BOS_FINISHED);
diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c
index 3c253a5..d2286ba 100644
--- a/bin/varnishd/storage/storage_simple.c
+++ b/bin/varnishd/storage/storage_simple.c
@@ -258,16 +258,16 @@ sml_iterator(struct worker *wrk, struct objcore *oc,
 	while (1) {
 		ol = len;
 		nl = VBO_waitlen(wrk, oc, bo, ol);
-		if (bo->state == BOS_FAILED) {
+		if (bo->boc->state == BOS_FAILED) {
 			ret = -1;
 			break;
 		}
 		if (nl == ol) {
-			if (bo->state == BOS_FINISHED)
+			if (bo->boc->state == BOS_FINISHED)
 				break;
 			continue;
 		}
-		Lck_Lock(&bo->mtx);
+		Lck_Lock(&bo->boc->mtx);
 		AZ(VTAILQ_EMPTY(&obj->list));
 		if (checkpoint == NULL) {
 			st = VTAILQ_FIRST(&obj->list);
@@ -300,8 +300,8 @@ sml_iterator(struct worker *wrk, struct objcore *oc,
 		st = VTAILQ_NEXT(st, list);
 		if (st != NULL && st->len == 0)
 			st = NULL;
-		Lck_Unlock(&bo->mtx);
-		assert(l > 0 || bo->state == BOS_FINISHED);
+		Lck_Unlock(&bo->boc->mtx);
+		assert(l > 0 || bo->boc->state == BOS_FINISHED);
 		if (func(priv, st != NULL ? 0 : 1, p, l)) {
 			ret = -1;
 			break;
@@ -375,9 +375,9 @@ sml_getspace(struct worker *wrk, struct objcore *oc, ssize_t *sz,
 
 	if (oc->busyobj != NULL) {
 		CHECK_OBJ_NOTNULL(oc->busyobj, BUSYOBJ_MAGIC);
-		Lck_Lock(&oc->busyobj->mtx);
+		Lck_Lock(&oc->busyobj->boc->mtx);
 		VTAILQ_INSERT_TAIL(&o->list, st, list);
-		Lck_Unlock(&oc->busyobj->mtx);
+		Lck_Unlock(&oc->busyobj->boc->mtx);
 	} else {
 		AN(oc->flags & (OC_F_PRIVATE));
 		VTAILQ_INSERT_TAIL(&o->list, st, list);
@@ -443,9 +443,9 @@ sml_trimstore(struct worker *wrk, struct objcore *oc)
 
 	if (st->len == 0) {
 		if (oc->busyobj != NULL) {
-			Lck_Lock(&oc->busyobj->mtx);
+			Lck_Lock(&oc->busyobj->boc->mtx);
 			VTAILQ_REMOVE(&o->list, st, list);
-			Lck_Unlock(&oc->busyobj->mtx);
+			Lck_Unlock(&oc->busyobj->boc->mtx);
 		} else {
 			VTAILQ_REMOVE(&o->list, st, list);
 		}
@@ -468,10 +468,10 @@ sml_trimstore(struct worker *wrk, struct objcore *oc)
 	memcpy(st1->ptr, st->ptr, st->len);
 	st1->len = st->len;
 	if (oc->busyobj != NULL) {
-		Lck_Lock(&oc->busyobj->mtx);
+		Lck_Lock(&oc->busyobj->boc->mtx);
 		VTAILQ_REMOVE(&o->list, st, list);
 		VTAILQ_INSERT_TAIL(&o->list, st1, list);
-		Lck_Unlock(&oc->busyobj->mtx);
+		Lck_Unlock(&oc->busyobj->boc->mtx);
 	} else {
 		VTAILQ_REMOVE(&o->list, st, list);
 		VTAILQ_INSERT_TAIL(&o->list, st1, list);
@@ -480,8 +480,8 @@ sml_trimstore(struct worker *wrk, struct objcore *oc)
 		sml_stv_free(stv, st);
 	} else {
 		/* sml_stable frees this */
-		AZ(oc->busyobj->stevedore_priv);
-		oc->busyobj->stevedore_priv = st;
+		AZ(oc->busyobj->boc->stevedore_priv);
+		oc->busyobj->boc->stevedore_priv = st;
 	}
 }
 
@@ -495,10 +495,10 @@ sml_stable(struct worker *wrk, struct objcore *oc, struct busyobj *bo)
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
 
-	if (bo->stevedore_priv == NULL)
+	if (bo->boc->stevedore_priv == NULL)
 		return;
-	CAST_OBJ_NOTNULL(st, bo->stevedore_priv, STORAGE_MAGIC);
-	bo->stevedore_priv = 0;
+	CAST_OBJ_NOTNULL(st, bo->boc->stevedore_priv, STORAGE_MAGIC);
+	bo->boc->stevedore_priv = 0;
 	CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
 	stv = oc->stobj->stevedore;
 	CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC);



More information about the varnish-commit mailing list