[master] 50aeefc Allow VMODs to hold a reference on a warm VCL

Dridi Boukelmoune dridi.boukelmoune at gmail.com
Fri Dec 4 16:03:22 CET 2015


commit 50aeefcceec15b765b2be0bb45d5c2de057802fe
Author: Dridi Boukelmoune <dridi.boukelmoune at gmail.com>
Date:   Fri Dec 4 15:24:19 2015 +0100

    Allow VMODs to hold a reference on a warm VCL

diff --git a/bin/varnishd/cache/cache_vcl.c b/bin/varnishd/cache/cache_vcl.c
index cdd4124..8f41773 100644
--- a/bin/varnishd/cache/cache_vcl.c
+++ b/bin/varnishd/cache/cache_vcl.c
@@ -61,6 +61,7 @@ struct vcl {
 	char			state[8];
 	char			*loaded_name;
 	unsigned		busy;
+	unsigned		refcount;
 	unsigned		discard;
 	const char		*temp;
 	VTAILQ_HEAD(,backend)	backend_list;
@@ -244,6 +245,7 @@ vcl_KillBackends(struct vcl *vcl)
 
 	CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
 	AZ(vcl->busy);
+	AZ(vcl->refcount);
 	while (1) {
 		be = VTAILQ_FIRST(&vcl->backend_list);
 		if (be == NULL)
@@ -366,6 +368,41 @@ VRT_count(VRT_CTX, unsigned u)
 		    ctx->vcl->conf->ref[u].line, ctx->vcl->conf->ref[u].pos);
 }
 
+void
+VRT_ref_vcl(VRT_CTX)
+{
+	struct vcl *vcl;
+
+	ASSERT_CLI();
+	CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
+
+	vcl = ctx->vcl;
+	CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
+	xxxassert(vcl->temp == vcl_temp_warm);
+
+	Lck_Lock(&vcl_mtx);
+	vcl->refcount++;
+	Lck_Unlock(&vcl_mtx);
+}
+
+void
+VRT_rel_vcl(VRT_CTX)
+{
+	struct vcl *vcl;
+
+	CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
+
+	vcl = ctx->vcl;
+	CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
+	assert(vcl->temp == vcl_temp_warm || vcl->temp == vcl_temp_cooling);
+
+	Lck_Lock(&vcl_mtx);
+	assert(vcl->refcount > 0);
+	vcl->refcount--;
+	/* No garbage collection here, for the same reasons as in VCL_Rel. */
+	Lck_Unlock(&vcl_mtx);
+}
+
 /*--------------------------------------------------------------------*/
 
 static struct vcl *
@@ -402,7 +439,7 @@ vcl_set_state(struct vcl *vcl, const char *state)
 			vcl->temp = vcl_temp_cold;
 		if (vcl->temp == vcl_temp_cold)
 			break;
-		if (vcl->busy == 0) {
+		if (vcl->busy == 0 && vcl->refcount == 0) {
 			vcl->temp = vcl_temp_cold;
 			AZ(vcl->conf->event_vcl(&ctx, VCL_EVENT_COLD));
 			vcl_BackendEvent(vcl, VCL_EVENT_COLD);
@@ -508,6 +545,7 @@ VCL_Nuke(struct vcl *vcl)
 	assert(vcl != vcl_active);
 	assert(vcl->discard);
 	AZ(vcl->busy);
+	AZ(vcl->refcount);
 	VTAILQ_REMOVE(&vcl_head, vcl, list);
 	ctx.method = VCL_MET_FINI;
 	ctx.handling = &hand;
@@ -531,7 +569,7 @@ VCL_Poll(void)
 	VTAILQ_FOREACH_SAFE(vcl, &vcl_head, list, vcl2) {
 		if (vcl->temp == vcl_temp_cooling)
 			vcl_set_state(vcl, "0");
-		if (vcl->discard && vcl->busy == 0)
+		if (vcl->discard && vcl->busy == 0 && vcl->refcount == 0)
 			VCL_Nuke(vcl);
 	}
 }
@@ -602,7 +640,7 @@ ccf_config_discard(struct cli *cli, const char * const *av, void *priv)
 	vcl->discard = 1;
 	Lck_Unlock(&vcl_mtx);
 
-	if (vcl->busy == 0)
+	if (vcl->busy == 0 && vcl->refcount == 0)
 		VCL_Nuke(vcl);
 }
 
diff --git a/doc/sphinx/reference/vmod.rst b/doc/sphinx/reference/vmod.rst
index fac9e70..ad6e03f 100644
--- a/doc/sphinx/reference/vmod.rst
+++ b/doc/sphinx/reference/vmod.rst
@@ -383,6 +383,13 @@ first with a ``VCL_EVENT_WARM`` event. Unless a user decides that a given VCL
 should always be warm, an inactive VMOD will eventually become cold and should
 manage resources accordingly.
 
+If your VMOD is running an asynchronous background job you can hold a reference
+to the VCL to prevent it from going cold too soon and get the same guarantees
+as backends with ongoing requests for instance. For that, you must acquire the
+reference by calling ``VRT_ref_vcl`` when you receive a ``VCL_EVENT_WARM`` and
+later calling ``VRT_rel_vcl`` once the background job is over. Receiving a
+``VCL_EVENT_COLD`` is your cue to terminate any background job bound to a VCL.
+
 There is also a ``VCL_EVENT_USE`` event. Please note that this event is now
 deprecated and may be removed in a future release. A warm VCL should be ready
 to use so no additional task should be postponed at use time.
@@ -396,7 +403,7 @@ their own locking to protect shared resources.
 When a VCL is loaded or unloaded, the event and priv->free are
 run sequentially all in a single thread, and there is guaranteed
 to be no other activity related to this particular VCL, nor are
-there  init/fini activity in any other VCL or VMOD at this time.
+there init/fini activity in any other VCL or VMOD at this time.
 
 That means that the VMOD init, and any object init/fini functions
 are already serialized in sensible order, and won't need any locking,
diff --git a/include/vrt.h b/include/vrt.h
index 2d0a012..2203bd8 100644
--- a/include/vrt.h
+++ b/include/vrt.h
@@ -295,6 +295,9 @@ struct vmod_priv {
 typedef int vmod_event_f(VRT_CTX, struct vmod_priv *, enum vcl_event_e);
 #endif
 
+void VRT_ref_vcl(VRT_CTX);
+void VRT_rel_vcl(VRT_CTX);
+
 void VRT_priv_fini(const struct vmod_priv *p);
 struct vmod_priv *VRT_priv_task(VRT_CTX, void *vmod_id);
 struct vmod_priv *VRT_priv_top(VRT_CTX, void *vmod_id);



More information about the varnish-commit mailing list