[master] 00871ce Give TASK_QUEUE_ enum names decriptive of what is being queued (BO, REQ & VCA)
Poul-Henning Kamp
phk at FreeBSD.org
Tue Jun 9 22:31:25 CEST 2015
commit 00871cef5322f39807c819e52a00013703e2bdb4
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date: Tue Jun 9 20:28:05 2015 +0000
Give TASK_QUEUE_ enum names decriptive of what is being queued
(BO, REQ & VCA)
Add new a new TASK_QUEUE level for backend requests and give it
higher priority than client requests to avoid a priority inversion
when (almost) all threads are stuck waiting for backend fetches
which doesn't happen because there are no worker threads.
Backend polls are left at TASK_QUEUE_REQ priority for now, pending
reasons not to.
diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index e36d9b7..a29d998 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -314,8 +314,9 @@ struct pool_task {
};
enum task_how {
- TASK_QUEUE_FRONT,
- TASK_QUEUE_BACK,
+ TASK_QUEUE_BO,
+ TASK_QUEUE_REQ,
+ TASK_QUEUE_VCA,
TASK_QUEUE_END
};
diff --git a/bin/varnishd/cache/cache_acceptor.c b/bin/varnishd/cache/cache_acceptor.c
index 479754e..90965c3 100644
--- a/bin/varnishd/cache/cache_acceptor.c
+++ b/bin/varnishd/cache/cache_acceptor.c
@@ -437,7 +437,7 @@ vca_accept_task(struct worker *wrk, void *arg)
* must reschedule the listening task so it will be
* taken up by another thread again.
*/
- AZ(Pool_Task(wrk->pool, &ps->task, TASK_QUEUE_BACK));
+ AZ(Pool_Task(wrk->pool, &ps->task, TASK_QUEUE_VCA));
return;
}
@@ -468,7 +468,7 @@ VCA_NewPool(struct pool *pp)
ps->task.func = vca_accept_task;
ps->task.priv = ps;
ps->pool = pp;
- AZ(Pool_Task(pp, &ps->task, TASK_QUEUE_BACK));
+ AZ(Pool_Task(pp, &ps->task, TASK_QUEUE_VCA));
}
}
diff --git a/bin/varnishd/cache/cache_backend_poll.c b/bin/varnishd/cache/cache_backend_poll.c
index 0d06693..c658417 100644
--- a/bin/varnishd/cache/cache_backend_poll.c
+++ b/bin/varnishd/cache/cache_backend_poll.c
@@ -363,7 +363,7 @@ vbp_thread(struct worker *wrk, void *priv)
vt->task.func = vbp_task;
vt->task.priv = vt;
- if (Pool_Task_Any(&vt->task, TASK_QUEUE_FRONT)) {
+ if (Pool_Task_Any(&vt->task, TASK_QUEUE_REQ)) {
Lck_Lock(&vbp_mtx);
vt->running = 0;
Lck_Unlock(&vbp_mtx);
diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c
index 73ad0a4..3e13dd2 100644
--- a/bin/varnishd/cache/cache_fetch.c
+++ b/bin/varnishd/cache/cache_fetch.c
@@ -1003,7 +1003,7 @@ VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
bo->fetch_task.priv = bo_fetch;
bo->fetch_task.func = vbf_fetch_thread;
- if (Pool_Task(wrk->pool, &bo->fetch_task, TASK_QUEUE_FRONT)) {
+ if (Pool_Task(wrk->pool, &bo->fetch_task, TASK_QUEUE_BO)) {
wrk->stats->fetch_no_thread++;
(void)vbf_stp_fail(req->wrk, bo);
if (bo->stale_oc != NULL)
diff --git a/bin/varnishd/cache/cache_session.c b/bin/varnishd/cache/cache_session.c
index 30f4b08..e19c0b9 100644
--- a/bin/varnishd/cache/cache_session.c
+++ b/bin/varnishd/cache/cache_session.c
@@ -415,7 +415,7 @@ SES_Reschedule_Req(struct req *req)
req->task.func = SES_Proto_Req;
req->task.priv = req;
- return (Pool_Task(pp, &req->task, TASK_QUEUE_FRONT));
+ return (Pool_Task(pp, &req->task, TASK_QUEUE_REQ));
}
/*--------------------------------------------------------------------
@@ -451,7 +451,7 @@ ses_handle(struct waited *wp, enum wait_event ev, double now)
tp = (void*)sp->ws->f;
tp->func = SES_Proto_Sess;
tp->priv = sp;
- if (Pool_Task(pp, tp, TASK_QUEUE_FRONT))
+ if (Pool_Task(pp, tp, TASK_QUEUE_REQ))
SES_Delete(sp, SC_OVERLOAD, now);
break;
case WAITER_CLOSE:
diff --git a/bin/varnishd/cache/cache_wrk.c b/bin/varnishd/cache/cache_wrk.c
index 2337ab9..7167303 100644
--- a/bin/varnishd/cache/cache_wrk.c
+++ b/bin/varnishd/cache/cache_wrk.c
@@ -226,7 +226,7 @@ Pool_Task(struct pool *pp, struct pool_task *task, enum task_how how)
CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
AN(task);
AN(task->func);
- assert(how >= TASK_QUEUE_FRONT && how <= TASK_QUEUE_BACK);
+ assert(how >= TASK_QUEUE_BO && how <= TASK_QUEUE_VCA);
Lck_Lock(&pp->mtx);
@@ -244,7 +244,7 @@ Pool_Task(struct pool *pp, struct pool_task *task, enum task_how how)
}
/* Acceptors are not subject to queue limits */
- if (how == TASK_QUEUE_BACK ||
+ if (how == TASK_QUEUE_VCA ||
pp->lqueue < cache_param->wthread_max +
cache_param->wthread_queue_limit + pp->nthr) {
pp->nqueued++;
More information about the varnish-commit
mailing list