[master] 093c3ad55 hash: Only grant OC_F_BUSY to lookup-able objects

Dridi Boukelmoune dridi.boukelmoune at gmail.com
Wed Aug 27 15:23:05 UTC 2025


commit 093c3ad55a39a5674ab6850e29afbd4ebe95596e
Author: Dridi Boukelmoune <dridi.boukelmoune at gmail.com>
Date:   Mon Mar 4 18:31:10 2024 +0100

    hash: Only grant OC_F_BUSY to lookup-able objects
    
    The OC_F_BUSY flag is used to decide whether to disembark a request into
    an objhead's waiting list. Therefore, it makes sense to only give this
    flag to objcores for which the outcome may be a shareable object.
    
    This excludes synthetic and private (req.body and passed req) objcores,
    and objects inserted with HSH_Insert(). Inserted objects are never busy
    since they come already ready to use.
    
    Pass transactions don't need this flag either. The OC_F_BUSY flag was
    somewhat conflated with struct busyobj in the fetch state machine. We
    don't need OC_F_BUSY to convey that a fetch task is coordinated with a
    req task, oc->boc is already doing that. The OC_F_BUSY|OC_F_PRIVATE bits
    should be considered an oxymoron since OC_F_BUSY drives waiting list
    activity and OC_F_PRIVATE is for objects that are not shareable by
    definition.

diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c
index 4ea54d9f9..c707a22b6 100644
--- a/bin/varnishd/cache/cache_fetch.c
+++ b/bin/varnishd/cache/cache_fetch.c
@@ -934,7 +934,8 @@ vbf_stp_error(struct worker *wrk, struct busyobj *bo)
 	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
 	oc = bo->fetch_objcore;
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	AN(oc->flags & OC_F_BUSY);
+	CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC);
+	assert(oc->boc->state < BOS_STREAM);
 	assert(bo->director_state == DIR_S_NULL);
 
 	if (wrk->vpi->handling != VCL_RET_ERROR)
@@ -1168,7 +1169,6 @@ VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
 	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
-	AN(oc->flags & OC_F_BUSY);
 	CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC);
 
 	bo = VBO_GetBusyObj(wrk, req);
@@ -1177,6 +1177,7 @@ VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
 
 	boc = HSH_RefBoc(oc);
 	CHECK_OBJ_NOTNULL(boc, BOC_MAGIC);
+	assert(boc->state < BOS_STREAM);
 	boc->transit_buffer = cache_param->transit_buffer;
 
 	switch (mode) {
diff --git a/bin/varnishd/cache/cache_hash.c b/bin/varnishd/cache/cache_hash.c
index 5d133a88f..cd7c85c74 100644
--- a/bin/varnishd/cache/cache_hash.c
+++ b/bin/varnishd/cache/cache_hash.c
@@ -316,7 +316,7 @@ HSH_Insert(struct worker *wrk, const void *digest, struct objcore *oc,
 	AN(digest);
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 	AN(ban);
-	AN(oc->flags & OC_F_BUSY);
+	AZ(oc->flags & OC_F_BUSY);
 	AZ(oc->flags & OC_F_PRIVATE);
 	assert(oc->refcnt == 1);
 	INIT_OBJ(&rush, RUSH_MAGIC);
@@ -344,7 +344,6 @@ HSH_Insert(struct worker *wrk, const void *digest, struct objcore *oc,
 	Lck_Lock(&oh->mtx);
 	VTAILQ_REMOVE(&oh->objcs, oc, hsh_list);
 	VTAILQ_INSERT_HEAD(&oh->objcs, oc, hsh_list);
-	oc->flags &= ~OC_F_BUSY;
 	if (!VTAILQ_EMPTY(&oh->waitinglist))
 		hsh_rush1(wrk, oh, &rush, HSH_RUSH_POLICY);
 	Lck_Unlock(&oh->mtx);
@@ -370,7 +369,8 @@ hsh_insert_busyobj(const struct worker *wrk, struct objhead *oh)
 	wrk->wpriv->nobjcore = NULL;
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
 
-	AN(oc->flags & OC_F_BUSY);
+	AZ(oc->flags & OC_F_BUSY);
+	oc->flags |= OC_F_BUSY;
 	oc->refcnt = 1;		/* Owned by busyobj */
 	oc->objhead = oh;
 	VTAILQ_INSERT_TAIL(&oh->objcs, oc, hsh_list);
@@ -821,15 +821,24 @@ HSH_Fail(struct objcore *oc)
 	struct objhead *oh;
 
 	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
+	CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC);
 	oh = oc->objhead;
 	CHECK_OBJ(oh, OBJHEAD_MAGIC);
 
 	/*
-	 * We have to have either a busy bit, so that HSH_Lookup
-	 * will not consider this oc, or an object hung of the oc
+	 * We either failed before the end of vcl_backend_response
+	 * and a cache miss has the busy bit, so that HSH_Lookup()
+	 * will not consider this oc, or an object hung off the oc
 	 * so that it can consider it.
+	 *
+	 * We can only fail an ongoing fetch in a backend context
+	 * so we can safely check the BOC state as it won't change
+	 * under our feet.
 	 */
-	assert((oc->flags & OC_F_BUSY) || (oc->stobj->stevedore != NULL));
+	if (oc->boc->state < BOS_STREAM)
+		assert(oc->flags & (OC_F_BUSY|OC_F_PRIVATE));
+	else
+		assert(oc->stobj->stevedore != NULL);
 
 	Lck_Lock(&oh->mtx);
 	oc->flags |= OC_F_FAILED;
@@ -909,15 +918,15 @@ HSH_Unbusy(struct worker *wrk, struct objcore *oc)
 	CHECK_OBJ(oh, OBJHEAD_MAGIC);
 
 	AN(oc->stobj->stevedore);
-	AN(oc->flags & OC_F_BUSY);
 	assert(oh->refcnt > 0);
 	assert(oc->refcnt > 0);
 
 	if (oc->flags & OC_F_PRIVATE) {
-		oc->flags &= ~OC_F_BUSY;
+		AZ(oc->flags & OC_F_BUSY);
 		return;
 	}
 
+	AN(oc->flags & OC_F_BUSY);
 	INIT_OBJ(&rush, RUSH_MAGIC);
 
 	BAN_NewObjCore(oc);
diff --git a/bin/varnishd/cache/cache_obj.c b/bin/varnishd/cache/cache_obj.c
index c85ecd618..4c40d3b8d 100644
--- a/bin/varnishd/cache/cache_obj.c
+++ b/bin/varnishd/cache/cache_obj.c
@@ -140,8 +140,6 @@ ObjNew(const struct worker *wrk)
 	AN(oc);
 	wrk->stats->n_objectcore++;
 	oc->last_lru = NAN;
-	oc->flags = OC_F_BUSY;
-
 	oc->boc = obj_newboc();
 
 	return (oc);


More information about the varnish-commit mailing list