[master] 36293d0 Make VMODs actually fail warm-ups

Dridi Boukelmoune dridi.boukelmoune at gmail.com
Mon Jan 18 22:55:38 CET 2016


commit 36293d0d563f62cd07acacd65ce74011aab31106
Author: Dridi Boukelmoune <dridi.boukelmoune at gmail.com>
Date:   Mon Dec 7 11:18:33 2015 +0100

    Make VMODs actually fail warm-ups
    
    The implementation is similar to the load/discard dance when a load
    fails. New VGC functions are introduced iff the VCL has at least one
    VMOD handling events.
    
    The generated code looks like this:
    
    	static unsigned vgc_inistep;
    	static unsigned vgc_warmupstep;
    
    	...
    
    	static int
    	VGC_Load(VRT_CTX)
    	{
    		...
    	}
    
    	static int
    	VGC_Discard(VRT_CTX)
    	{
    		...
    	}
    
    	static int
    	VGC_Warmup(VRT_CTX, enum vcl_event_e ev)
    	{
    
    		vgc_warmupstep = 0;
    
    		/* 4 */
    		if (Vmod_debug_Func._event(ctx, &vmod_priv_debug, ev))
    			return (1);
    		vgc_warmupstep = 4;
    
    		return (0);
    	}
    
    	static int
    	VGC_Use(VRT_CTX, enum vcl_event_e ev)
    	{
    
    		/* 4 */
    		if (Vmod_debug_Func._event(ctx, &vmod_priv_debug, ev))
    			return (1);
    
    		return (0);
    	}
    
    	static int
    	VGC_Cooldown(VRT_CTX, enum vcl_event_e ev)
    	{
    		int retval = 0;
    
    		/* 4 */
    		if (vgc_warmupstep >= 4 &&
    		    Vmod_debug_Func._event(ctx, &vmod_priv_debug, ev) != 0)
    			retval = 1;
    
    		return (retval);
    	}
    
    	static int
    	VGC_Event(VRT_CTX, enum vcl_event_e ev)
    	{
    		if (ev == VCL_EVENT_LOAD)
    			return(VGC_Load(ctx));
    		if (ev == VCL_EVENT_WARM)
    			return(VGC_Warmup(ctx, ev));
    		if (ev == VCL_EVENT_USE)
    			return(VGC_Use(ctx, ev));
    		if (ev == VCL_EVENT_COLD)
    			return(VGC_Cooldown(ctx, ev));
    		if (ev == VCL_EVENT_DISCARD)
    			return(VGC_Discard(ctx));
    
    		return (1);
    	}
    
    However, if there are no VMODs handling events, no new functions shall
    be generated, leading to code looking like this:
    
    	static unsigned vgc_inistep;
    	static unsigned vgc_warmupstep;
    
    	...
    
    	static int
    	VGC_Load(VRT_CTX)
    	{
    		...
    	}
    
    	static int
    	VGC_Discard(VRT_CTX)
    	{
    		...
    	}
    
    	static int
    	VGC_Event(VRT_CTX, enum vcl_event_e ev)
    	{
    		if (ev == VCL_EVENT_LOAD)
    			return(VGC_Load(ctx));
    		if (ev == VCL_EVENT_DISCARD)
    			return(VGC_Discard(ctx));
    
    		(void)vgc_warmupstep;
    		return (0);
    	}

diff --git a/doc/sphinx/reference/vmod.rst b/doc/sphinx/reference/vmod.rst
index 57fa09c..c6eb274 100644
--- a/doc/sphinx/reference/vmod.rst
+++ b/doc/sphinx/reference/vmod.rst
@@ -383,6 +383,13 @@ first with a ``VCL_EVENT_WARM`` event. Unless a user decides that a given VCL
 should always be warm, an inactive VMOD will eventually become cold and should
 manage resources accordingly.
 
+An event function must return zero upon success. It is only possible to fail
+an initialization with the ``VCL_EVENT_LOAD`` or ``VCL_EVENT_WARM`` events.
+Should such a failure happen, a ``VCL_EVENT_DISCARD`` or ``VCL_EVENT_COLD``
+event will be sent to the VMODs that succeeded to put them back in a cold
+state. The VMOD that failed will not receive this event, and therefore must
+not be left half-initialized should a failure occur.
+
 If your VMOD is running an asynchronous background job you can hold a reference
 to the VCL to prevent it from going cold too soon and get the same guarantees
 as backends with ongoing requests for instance. For that, you must acquire the
diff --git a/lib/libvcc/vcc_compile.c b/lib/libvcc/vcc_compile.c
index 62aaeae..da6d8b4 100644
--- a/lib/libvcc/vcc_compile.c
+++ b/lib/libvcc/vcc_compile.c
@@ -325,20 +325,22 @@ EmitCoordinates(const struct vcc *tl, struct vsb *vsb)
 /*--------------------------------------------------------------------
  * Init/Fini/Event
  *
- * We call Fini-s in the opposite order of init-s.
- * Other events are called in same order as init-s, no matter which
- * event it might be.
+ * We call DISCARD and COLD events in the opposite order of LOAD and
+ * WARM.
  */
 
 static void
 EmitInitFini(const struct vcc *tl)
 {
 	struct inifin *p;
+	unsigned has_event = 0;
 
-	Fh(tl, 0, "\nstatic unsigned vgc_inistep;\n");
+	Fh(tl, 0, "\n");
+	Fh(tl, 0, "static unsigned vgc_inistep;\n");
+	Fh(tl, 0, "static unsigned vgc_warmupstep;\n");
 
 	/*
-	 * INIT
+	 * LOAD
 	 */
 	Fc(tl, 0, "\nstatic int\nVGC_Load(VRT_CTX)\n{\n\n");
 	Fc(tl, 0, "\tvgc_inistep = 0;\n\n");
@@ -349,6 +351,10 @@ EmitInitFini(const struct vcc *tl)
 			Fc(tl, 0, "\t/* %u */\n%s\n", p->n, VSB_data(p->ini));
 		Fc(tl, 0, "\tvgc_inistep = %u;\n\n", p->n);
 		VSB_delete(p->ini);
+
+		AZ(VSB_finish(p->event));
+		if (VSB_len(p->event))
+			has_event = 1;
 	}
 
 	Fc(tl, 0, "\t(void)VGC_function_vcl_init(ctx);\n");
@@ -356,7 +362,7 @@ EmitInitFini(const struct vcc *tl)
 	Fc(tl, 0, "}\n");
 
 	/*
-	 * FINI
+	 * DISCARD
 	 */
 	Fc(tl, 0, "\nstatic int\nVGC_Discard(VRT_CTX)\n{\n\n");
 
@@ -375,6 +381,66 @@ EmitInitFini(const struct vcc *tl)
 	Fc(tl, 0, "\treturn(0);\n");
 	Fc(tl, 0, "}\n");
 
+	if (has_event) {
+		/*
+		 * WARM
+		 */
+		Fc(tl, 0, "\nstatic int\n");
+		Fc(tl, 0, "VGC_Warmup(VRT_CTX, enum vcl_event_e ev)\n{\n\n");
+
+		Fc(tl, 0, "\tvgc_warmupstep = 0;\n\n");
+		VTAILQ_FOREACH(p, &tl->inifin, list) {
+			assert(p->n > 0);
+			if (VSB_len(p->event)) {
+				Fc(tl, 0, "\t/* %u */\n", p->n);
+				Fc(tl, 0, "\tif (%s)\n", VSB_data(p->event));
+				Fc(tl, 0, "\t\treturn (1);\n");
+				Fc(tl, 0, "\tvgc_warmupstep = %u;\n\n", p->n);
+			}
+		}
+
+		Fc(tl, 0, "\treturn (0);\n");
+		Fc(tl, 0, "}\n");
+
+		/*
+		 * USE (deprecated)
+		 */
+		Fc(tl, 0, "\nstatic int\n");
+		Fc(tl, 0, "VGC_Use(VRT_CTX, enum vcl_event_e ev)\n{\n\n");
+
+		VTAILQ_FOREACH(p, &tl->inifin, list) {
+			assert(p->n > 0);
+			if (VSB_len(p->event)) {
+				Fc(tl, 0, "\t/* %u */\n", p->n);
+				Fc(tl, 0, "\tif (%s)\n", VSB_data(p->event));
+				Fc(tl, 0, "\t\treturn (1);\n\n");
+			}
+		}
+
+		Fc(tl, 0, "\treturn (0);\n");
+		Fc(tl, 0, "}\n");
+
+		/*
+		 * COLD
+		 */
+		Fc(tl, 0, "\nstatic int\n");
+		Fc(tl, 0, "VGC_Cooldown(VRT_CTX, enum vcl_event_e ev)\n{\n");
+		Fc(tl, 0, "\tint retval = 0;\n\n");
+
+		VTAILQ_FOREACH_REVERSE(p, &tl->inifin, inifinhead, list) {
+			if (VSB_len(p->event)) {
+				Fc(tl, 0, "\t/* %u */\n", p->n);
+				Fc(tl, 0, "\tif (vgc_warmupstep >= %u &&\n", p->n);
+				Fc(tl, 0, "\t    %s != 0)\n", VSB_data(p->event));
+				Fc(tl, 0, "\t\tretval = 1;\n\n");
+			}
+			VSB_delete(p->event);
+		}
+
+		Fc(tl, 0, "\treturn (retval);\n");
+		Fc(tl, 0, "}\n");
+	}
+
 	/*
 	 * EVENTS
 	 */
@@ -383,16 +449,20 @@ EmitInitFini(const struct vcc *tl)
 	Fc(tl, 0, "{\n");
 	Fc(tl, 0, "\tif (ev == VCL_EVENT_LOAD)\n");
 	Fc(tl, 0, "\t\treturn(VGC_Load(ctx));\n");
+	if (has_event) {
+		Fc(tl, 0, "\tif (ev == VCL_EVENT_WARM)\n");
+		Fc(tl, 0, "\t\treturn(VGC_Warmup(ctx, ev));\n");
+		Fc(tl, 0, "\tif (ev == VCL_EVENT_USE)\n");
+		Fc(tl, 0, "\t\treturn(VGC_Use(ctx, ev));\n");
+		Fc(tl, 0, "\tif (ev == VCL_EVENT_COLD)\n");
+		Fc(tl, 0, "\t\treturn(VGC_Cooldown(ctx, ev));\n");
+	}
 	Fc(tl, 0, "\tif (ev == VCL_EVENT_DISCARD)\n");
 	Fc(tl, 0, "\t\treturn(VGC_Discard(ctx));\n");
 	Fc(tl, 0, "\n");
-	VTAILQ_FOREACH(p, &tl->inifin, list) {
-		AZ(VSB_finish(p->event));
-		if (VSB_len(p->event))
-			Fc(tl, 0, "\t/* %u */\n%s\n", p->n, VSB_data(p->event));
-		VSB_delete(p->event);
-	}
-	Fc(tl, 0, "\treturn (0);\n");
+	if (!has_event)
+		Fc(tl, 0, "\t(void)vgc_warmupstep;\n");
+	Fc(tl, 0, "\treturn (%d);\n", has_event ? 1 : 0);
 	Fc(tl, 0, "}\n");
 }
 
diff --git a/lib/libvcc/vcc_vmod.c b/lib/libvcc/vcc_vmod.c
index d1b8f26..3a522a5 100644
--- a/lib/libvcc/vcc_vmod.c
+++ b/lib/libvcc/vcc_vmod.c
@@ -228,8 +228,7 @@ vcc_ParseImport(struct vcc *tl)
 			VSB_printf(ifp->fin,
 			    "\t\t(void)%s(ctx, &vmod_priv_%.*s,\n"
 			    "\t\t    VCL_EVENT_DISCARD);\n", p, PF(mod));
-			VSB_printf(ifp->event,
-			    "\t(void)%s(ctx, &vmod_priv_%.*s, ev);\n",
+			VSB_printf(ifp->event, "\t%s(ctx, &vmod_priv_%.*s, ev)",
 			    p, PF(mod));
 		} else {
 			sym = VCC_AddSymbolStr(tl, p, SYM_FUNC);



More information about the varnish-commit mailing list