[4.1] 199d1ca keep a reserve of idle theads for vital tasks

PÃ¥l Hermunn Johansen hermunn at varnish-software.com
Tue Nov 22 14:31:05 CET 2016


commit 199d1ca4be308ddd9333fac1878a8d3e4707ef2a
Author: Nils Goroll <nils.goroll at uplex.de>
Date:   Thu Oct 27 09:33:02 2016 +0200

    keep a reserve of idle theads for vital tasks
    
    (backend tasks for the time being)
    
    Add parameter thread_pool_reserve to tweak
    
    Conflicts:
    	bin/varnishd/mgt/mgt_pool.c

diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index 6ac1972..1ea38e2 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -314,9 +314,14 @@ struct pool_task {
 	void				*priv;
 };
 
-/* tasks are taken off the queues in this order */
+/*
+ * tasks are taken off the queues in this order
+ *
+ * prios up to TASK_QUEUE_RESERVE are run from the reserve
+ */
 enum task_prio {
 	TASK_QUEUE_BO,
+#define TASK_QUEUE_RESERVE	TASK_QUEUE_BO
 	TASK_QUEUE_REQ,
 	TASK_QUEUE_VCA,
 	TASK_QUEUE_END
@@ -889,7 +894,7 @@ const char *sess_close_2str(enum sess_close sc, int want_desc);
 
 /* cache_pool.c */
 int Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio how);
-int Pool_Task_Arg(struct worker *, task_func_t *,
+int Pool_Task_Arg(struct worker *, enum task_prio, task_func_t *,
     const void *arg, size_t arg_len);
 void Pool_Sumstat(struct worker *w);
 int Pool_TrySumstat(struct worker *wrk);
diff --git a/bin/varnishd/cache/cache_acceptor.c b/bin/varnishd/cache/cache_acceptor.c
index e718374..5b2827a 100644
--- a/bin/varnishd/cache/cache_acceptor.c
+++ b/bin/varnishd/cache/cache_acceptor.c
@@ -438,7 +438,8 @@ vca_accept_task(struct worker *wrk, void *arg)
 
 		wa.acceptsock = i;
 
-		if (!Pool_Task_Arg(wrk, vca_make_session, &wa, sizeof wa)) {
+		if (!Pool_Task_Arg(wrk, TASK_QUEUE_VCA,
+		    vca_make_session, &wa, sizeof wa)) {
 			/*
 			 * We couldn't get another thread, so we will handle
 			 * the request in this worker thread, but first we
diff --git a/bin/varnishd/cache/cache_pool.h b/bin/varnishd/cache/cache_pool.h
index d3b9e70..239b9e3 100644
--- a/bin/varnishd/cache/cache_pool.h
+++ b/bin/varnishd/cache/cache_pool.h
@@ -42,6 +42,7 @@ struct pool {
 	pthread_t			herder_thr;
 
 	struct lock			mtx;
+	unsigned			nidle;
 	struct taskhead			idle_queue;
 	struct taskhead			queues[TASK_QUEUE_END];
 	unsigned			nthr;
diff --git a/bin/varnishd/cache/cache_wrk.c b/bin/varnishd/cache/cache_wrk.c
index 01f5902..ede1329 100644
--- a/bin/varnishd/cache/cache_wrk.c
+++ b/bin/varnishd/cache/cache_wrk.c
@@ -148,17 +148,35 @@ pool_addstat(struct dstat *dst, struct dstat *src)
 	memset(src, 0, sizeof *src);
 }
 
+static inline int
+pool_reserve(void)
+{
+	unsigned lim;
+
+	if (cache_param->wthread_reserve == 0)
+		return (cache_param->wthread_min / 20 + 1);
+	lim = cache_param->wthread_min * 950 / 1000;
+	if (cache_param->wthread_reserve > lim)
+		return (lim);
+	return (cache_param->wthread_reserve);
+}
+
 /*--------------------------------------------------------------------*/
 
 static struct worker *
-pool_getidleworker(struct pool *pp)
+pool_getidleworker(struct pool *pp, enum task_prio how)
 {
-	struct pool_task *pt;
+	struct pool_task *pt = NULL;
 	struct worker *wrk;
 
 	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
 	Lck_AssertHeld(&pp->mtx);
-	pt = VTAILQ_FIRST(&pp->idle_queue);
+	if (how <= TASK_QUEUE_RESERVE || pp->nidle > pool_reserve()) {
+		pt = VTAILQ_FIRST(&pp->idle_queue);
+		if (pt == NULL)
+			AZ(pp->nidle);
+	}
+
 	if (pt == NULL) {
 		if (pp->nthr < cache_param->wthread_max) {
 			pp->dry++;
@@ -179,7 +197,7 @@ pool_getidleworker(struct pool *pp)
  */
 
 int
-Pool_Task_Arg(struct worker *wrk, task_func_t *func,
+Pool_Task_Arg(struct worker *wrk, enum task_prio how, task_func_t *func,
     const void *arg, size_t arg_len)
 {
 	struct pool *pp;
@@ -193,9 +211,11 @@ Pool_Task_Arg(struct worker *wrk, task_func_t *func,
 	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
 
 	Lck_Lock(&pp->mtx);
-	wrk2 = pool_getidleworker(pp);
+	wrk2 = pool_getidleworker(pp, how);
 	if (wrk2 != NULL) {
+		AN(pp->nidle);
 		VTAILQ_REMOVE(&pp->idle_queue, &wrk2->task, list);
+		pp->nidle--;
 		retval = 1;
 	} else {
 		wrk2 = wrk;
@@ -222,7 +242,6 @@ Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio how)
 {
 	struct worker *wrk;
 	int retval = 0;
-
 	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
 	AN(task);
 	AN(task->func);
@@ -232,9 +251,11 @@ Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio how)
 
 	/* The common case first:  Take an idle thread, do it. */
 
-	wrk = pool_getidleworker(pp);
+	wrk = pool_getidleworker(pp, how);
 	if (wrk != NULL) {
+		AN(pp->nidle);
 		VTAILQ_REMOVE(&pp->idle_queue, &wrk->task, list);
+		pp->nidle--;
 		AZ(wrk->task.func);
 		wrk->task.func = task->func;
 		wrk->task.priv = task->priv;
@@ -282,7 +303,7 @@ Pool_Work_Thread(struct pool *pp, struct worker *wrk)
 {
 	struct pool_task *tp;
 	struct pool_task tpx, tps;
-	int i;
+	int i, prio_lim;
 
 	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
 	wrk->pool = pp;
@@ -294,7 +315,12 @@ Pool_Work_Thread(struct pool *pp, struct worker *wrk)
 		WS_Reset(wrk->aws, NULL);
 		AZ(wrk->vsl);
 
-		for (i = 0; i < TASK_QUEUE_END; i++) {
+		if (pp->nidle < pool_reserve())
+			prio_lim = TASK_QUEUE_RESERVE + 1;
+		else
+			prio_lim = TASK_QUEUE_END;
+
+		for (i = 0; i < prio_lim; i++) {
 			tp = VTAILQ_FIRST(&pp->queues[i]);
 			if (tp != NULL) {
 				pp->lqueue--;
@@ -323,6 +349,7 @@ Pool_Work_Thread(struct pool *pp, struct worker *wrk)
 			wrk->task.func = NULL;
 			wrk->task.priv = wrk;
 			VTAILQ_INSERT_HEAD(&pp->idle_queue, &wrk->task, list);
+			pp->nidle++;
 			do {
 				i = Lck_CondWait(&wrk->cond, &pp->mtx,
 				    wrk->vcl == NULL ?  0 : wrk->lastused+60.);
@@ -468,6 +495,7 @@ pool_herder(void *priv)
 			wrk = NULL;
 			pt = VTAILQ_LAST(&pp->idle_queue, taskhead);
 			if (pt != NULL) {
+				AN(pp->nidle);
 				AZ(pt->func);
 				CAST_OBJ_NOTNULL(wrk, pt->priv, WORKER_MAGIC);
 
@@ -476,6 +504,7 @@ pool_herder(void *priv)
 					/* Give it a kiss on the cheek... */
 					VTAILQ_REMOVE(&pp->idle_queue,
 					    &wrk->task, list);
+					pp->nidle--;
 					wrk->task.func = pool_kiss_of_death;
 					AZ(pthread_cond_signal(&wrk->cond));
 				} else {
diff --git a/bin/varnishd/common/params.h b/bin/varnishd/common/params.h
index 678ed26..9eabc2c 100644
--- a/bin/varnishd/common/params.h
+++ b/bin/varnishd/common/params.h
@@ -91,6 +91,7 @@ struct params {
 	/* Worker threads and pool */
 	unsigned		wthread_min;
 	unsigned		wthread_max;
+	unsigned		wthread_reserve;
 	double			wthread_timeout;
 	unsigned		wthread_pools;
 	double			wthread_add_delay;
diff --git a/bin/varnishd/mgt/mgt_pool.c b/bin/varnishd/mgt/mgt_pool.c
index 724087a..beb5f56 100644
--- a/bin/varnishd/mgt/mgt_pool.c
+++ b/bin/varnishd/mgt/mgt_pool.c
@@ -55,6 +55,7 @@
 
 static char min_val[20];
 static char max_val[20];
+static char reserve_max_val[20];
 
 static int
 tweak_thread_pool_min(struct vsb *vsb, const struct parspec *par,
@@ -64,10 +65,18 @@ tweak_thread_pool_min(struct vsb *vsb, const struct parspec *par,
 
 	if (tweak_generic_uint(vsb, par->priv, arg, par->min, par->max))
 		return (-1);
+
 	AN(VSB_new(&v2, min_val, sizeof min_val, 0));
 	AZ(tweak_generic_uint(&v2, &mgt_param.wthread_min, NULL, NULL, NULL));
 	AZ(VSB_finish(&v2));
 	MCF_SetMinimum("thread_pool_max", min_val);
+
+	unsigned t = mgt_param.wthread_min * 19 / 20;
+	AN(VSB_new(&v2, reserve_max_val, sizeof reserve_max_val, 0));
+	AZ(tweak_generic_uint(&v2, &t, NULL, NULL, NULL));
+	AZ(VSB_finish(&v2));
+	MCF_SetMaximum("thread_pool_reserve", reserve_max_val);
+
 	return (0);
 }
 
@@ -125,6 +134,25 @@ struct parspec WRK_parspec[] = {
 		"Minimum is 10 threads.",
 		DELAYED_EFFECT,
 		"100", "threads" },
+	{ "thread_pool_reserve", tweak_uint, &mgt_param.wthread_reserve,
+		0, NULL,
+		"The number of worker threads reserved for vital tasks "
+		"in each pool.\n"
+		"\n"
+		"Tasks may require other tasks to complete (for example, "
+		"client requests may require backend requests). This reserve "
+		"is to ensure that such tasks still get to run even under high "
+		"load.\n"
+		"\n"
+		"Increasing the reserve may help setups with a high number of "
+		"backend requests at the expense of client performance. "
+		"Setting it too high will waste resources by keeping threads "
+		"unused.\n"
+		"\n"
+		"Default is 0 to auto-tune (currently 5% of thread_pool_min).\n"
+		"Minimum is 1 otherwise, maximum is 95% of thread_pool_min.",
+		DELAYED_EFFECT,
+		"0", "threads" },
 	{ "thread_pool_timeout",
 		tweak_timeout, &mgt_param.wthread_timeout,
 		"10", NULL,
diff --git a/bin/varnishtest/tests/c00079.vtc b/bin/varnishtest/tests/c00079.vtc
new file mode 100644
index 0000000..a0d4c2e
--- /dev/null
+++ b/bin/varnishtest/tests/c00079.vtc
@@ -0,0 +1,23 @@
+varnishtest "thread_pool_reserve max adjustment"
+
+server s1 {
+} -start
+
+varnish v1 \
+	-arg "-p thread_pool_min=10" \
+	-arg "-p thread_pool_max=100" \
+	-arg "-p thread_pools=1" \
+	-arg "-p thread_pool_timeout=10" \
+	-vcl+backend {}
+varnish v1 -start
+
+varnish v1 -cliok "param.set thread_pool_reserve 0"
+varnish v1 -cliok "param.set thread_pool_reserve 1"
+varnish v1 -cliok "param.set thread_pool_reserve 9"
+varnish v1 -clierr 106 "param.set thread_pool_reserve 10"
+
+varnish v1 -cliok "param.set thread_pool_min 100"
+varnish v1 -cliok "param.set thread_pool_reserve 0"
+varnish v1 -cliok "param.set thread_pool_reserve 1"
+varnish v1 -cliok "param.set thread_pool_reserve 95"
+varnish v1 -clierr 106 "param.set thread_pool_reserve 96"
diff --git a/include/tbl/params.h b/include/tbl/params.h
index c68500b..9a272a3 100644
--- a/include/tbl/params.h
+++ b/include/tbl/params.h
@@ -1157,6 +1157,34 @@ PARAM(
 	/* l-text */	"",
 	/* func */	NULL
 )
+/* actual location mgt_pool.c */
+PARAM(
+	/* name */	thread_pool_reserve,
+	/* typ */	thread_pool_reserve,
+	/* min */	NULL,
+	/* max */	NULL,
+	/* default */	"0",
+	/* units */	"threads",
+	/* flags */	DELAYED_EFFECT| EXPERIMENTAL,
+	/* s-text */
+	"The number of worker threads reserved for vital tasks "
+	"in each pool.\n"
+	"\n"
+	"Tasks may require other tasks to complete (for example, "
+	"client requests may require backend requests). This reserve "
+	"is to ensure that such tasks still get to run even under high "
+	"load.\n"
+	"\n"
+	"Increasing the reserve may help setups with a high number of "
+	"backend requests at the expense of client performance. "
+	"Setting it too high will waste resources by keeping threads "
+	"unused.\n"
+	"\n"
+	"Default is 0 to auto-tune (currently 5% of thread_pool_min).\n"
+	"Minimum is 1 otherwise, maximum is 95% of thread_pool_min.",
+	/* l-text */	"",
+	/* func */	NULL
+)
 
 /* actual location mgt_pool.c */
 PARAM(



More information about the varnish-commit mailing list