[master] 3864e66 Move the vbc list into the new tcp_pool class.
Poul-Henning Kamp
phk at FreeBSD.org
Wed Jan 14 09:37:57 CET 2015
commit 3864e664636225fd9a879433dba8cd64d085f2f9
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date: Wed Jan 14 08:37:16 2015 +0000
Move the vbc list into the new tcp_pool class.
When the first backend connection fails (likely because of timeout
from the backend) we ditch the rest of the list.
diff --git a/bin/varnishd/cache/cache_backend.c b/bin/varnishd/cache/cache_backend.c
index a5c2b22..0f02784 100644
--- a/bin/varnishd/cache/cache_backend.c
+++ b/bin/varnishd/cache/cache_backend.c
@@ -105,7 +105,6 @@ bes_conn_try(struct busyobj *bo, struct vbc *vc, const struct vbe_dir *vs)
bp->vsc->conn++;
Lck_Unlock(&bp->mtx);
- s = -1;
assert(bp->ipv6 != NULL || bp->ipv4 != NULL);
/* release lock during stuff that can take a long time */
@@ -181,23 +180,25 @@ vbe_GetVbe(struct busyobj *bo, struct vbe_dir *vs)
bp = vs->backend;
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
+ if (!VBE_Healthy(bp, NULL)) {
+ VSC_C_main->backend_unhealthy++;
+ return (NULL);
+ }
+
/* first look for vbc's we can recycle */
while (1) {
- Lck_Lock(&bp->mtx);
- vc = VTAILQ_FIRST(&bp->connlist);
- if (vc != NULL) {
- bp->refcount++;
- assert(vc->backend == bp);
- assert(vc->fd >= 0);
- AN(vc->addr);
- VTAILQ_REMOVE(&bp->connlist, vc, list);
- }
- Lck_Unlock(&bp->mtx);
+ vc = VBT_Get(bp->tcp_pool);
if (vc == NULL)
break;
+
+ Lck_Lock(&bp->mtx);
+ bp->refcount++;
+ assert(vc->backend == bp);
+ assert(vc->fd >= 0);
+ AN(vc->addr);
+ Lck_Unlock(&bp->mtx);
+
if (vbe_CheckFd(vc->fd)) {
- /* XXX locking of stats */
- VSC_C_main->backend_reuse += 1;
VSLb(bo->vsl, SLT_Backend, "%d %s %s",
vc->fd, bo->director_resp->vcl_name,
bp->display_name);
@@ -205,7 +206,6 @@ vbe_GetVbe(struct busyobj *bo, struct vbe_dir *vs)
vc->recycled = 1;
return (vc);
}
- VSC_C_main->backend_toolate++;
VSLb(bo->vsl, SLT_BackendClose, "%d %s toolate",
vc->fd, bp->display_name);
@@ -215,11 +215,6 @@ vbe_GetVbe(struct busyobj *bo, struct vbe_dir *vs)
VBE_ReleaseConn(vc);
}
- if (!VBE_Healthy(bp, NULL)) {
- VSC_C_main->backend_unhealthy++;
- return (NULL);
- }
-
if (vs->vrt->max_connections > 0 &&
bp->n_conn >= vs->vrt->max_connections) {
VSC_C_main->backend_busy++;
@@ -357,8 +352,8 @@ vbe_dir_finish(const struct director *d, struct worker *wrk,
bp->display_name);
Lck_Lock(&bp->mtx);
VSC_C_main->backend_recycle++;
- VTAILQ_INSERT_HEAD(&bp->connlist, bo->htc->vbc, list);
VBE_DropRefLocked(bp, &bo->acct);
+ VBT_Recycle(bp->tcp_pool, &bo->htc->vbc);
}
bo->htc->vbc = NULL;
bo->htc = NULL;
diff --git a/bin/varnishd/cache/cache_backend.h b/bin/varnishd/cache/cache_backend.h
index b7c27e0..89e1649 100644
--- a/bin/varnishd/cache/cache_backend.h
+++ b/bin/varnishd/cache/cache_backend.h
@@ -71,7 +71,6 @@ struct backend {
struct suckaddr *ipv6;
unsigned n_conn;
- VTAILQ_HEAD(, vbc) connlist;
struct vbp_target *probe;
unsigned healthy;
@@ -127,4 +126,7 @@ struct tcp_pool *VBT_Ref(const char *name, const struct suckaddr *ip4,
const struct suckaddr *ip6);
void VBT_Rel(struct tcp_pool **tpp);
int VBT_Open(struct tcp_pool *tp, double tmo, const struct suckaddr **sa);
+void VBT_Recycle(struct tcp_pool *tp, struct vbc **vbc);
+struct vbc *VBT_Get(struct tcp_pool *tp);
+
diff --git a/bin/varnishd/cache/cache_backend_cfg.c b/bin/varnishd/cache/cache_backend_cfg.c
index 29c9be9..bb46761 100644
--- a/bin/varnishd/cache/cache_backend_cfg.c
+++ b/bin/varnishd/cache/cache_backend_cfg.c
@@ -101,7 +101,6 @@ void
VBE_DropRefLocked(struct backend *b, const struct acct_bereq *acct_bereq)
{
int i;
- struct vbc *vbe, *vbe2;
CHECK_OBJ_NOTNULL(b, BACKEND_MAGIC);
assert(b->refcount > 0);
@@ -119,15 +118,6 @@ VBE_DropRefLocked(struct backend *b, const struct acct_bereq *acct_bereq)
return;
ASSERT_CLI();
- VTAILQ_FOREACH_SAFE(vbe, &b->connlist, list, vbe2) {
- VTAILQ_REMOVE(&b->connlist, vbe, list);
- if (vbe->fd >= 0) {
- AZ(close(vbe->fd));
- vbe->fd = -1;
- }
- vbe->backend = NULL;
- VBE_ReleaseConn(vbe);
- }
VBE_Nuke(b);
}
@@ -202,7 +192,6 @@ VBE_AddBackend(struct cli *cli, const struct vrt_backend *vb)
b->vsc = VSM_Alloc(sizeof *b->vsc, VSC_CLASS, VSC_type_vbe, buf);
b->vsc->vcls++;
- VTAILQ_INIT(&b->connlist);
/*
* This backend may live longer than the VCL that instantiated it
diff --git a/bin/varnishd/cache/cache_backend_poll.c b/bin/varnishd/cache/cache_backend_poll.c
index 10fd4c3..319b23b 100644
--- a/bin/varnishd/cache/cache_backend_poll.c
+++ b/bin/varnishd/cache/cache_backend_poll.c
@@ -122,7 +122,6 @@ vbp_poke(struct vbp_target *vt)
t_start = t_now = VTIM_real();
t_end = t_start + vt->probe.timeout;
- tmo = (int)round((t_end - t_now) * 1e3);
s = VBT_Open(bp->tcp_pool, t_end - t_now, &sa);
if (s < 0) {
diff --git a/bin/varnishd/cache/cache_backend_tcp.c b/bin/varnishd/cache/cache_backend_tcp.c
index 175ab48..5495cff 100644
--- a/bin/varnishd/cache/cache_backend_tcp.c
+++ b/bin/varnishd/cache/cache_backend_tcp.c
@@ -54,13 +54,18 @@ struct tcp_pool {
VTAILQ_ENTRY(tcp_pool) list;
int refcnt;
+ struct lock mtx;
+ VTAILQ_HEAD(, vbc) connlist;
+ VTAILQ_HEAD(, vbc) killlist;
};
static VTAILQ_HEAD(, tcp_pool) pools = VTAILQ_HEAD_INITIALIZER(pools);
/*--------------------------------------------------------------------
+ * Reference a TCP pool given by {name, ip4, ip6} triplet. Create if
+ * it doesn't exist already.
*/
struct tcp_pool *
@@ -104,17 +109,22 @@ VBT_Ref(const char *name, const struct suckaddr *ip4,
if (ip6 != NULL)
tp->ip6 = VSA_Clone(ip6);
tp->refcnt = 1;
+ Lck_New(&tp->mtx, lck_backend);
+ VTAILQ_INIT(&tp->connlist);
+ VTAILQ_INIT(&tp->killlist);
VTAILQ_INSERT_HEAD(&pools, tp, list);
return (tp);
}
/*--------------------------------------------------------------------
+ * Release TCP pool, destroy if last reference.
*/
void
VBT_Rel(struct tcp_pool **tpp)
{
struct tcp_pool *tp;
+ struct vbc *vbc, *vbc2;
AN(tpp);
tp = *tpp;
@@ -127,10 +137,27 @@ VBT_Rel(struct tcp_pool **tpp)
free(tp->name);
free(tp->ip4);
free(tp->ip6);
+ Lck_Delete(&tp->mtx);
+ VTAILQ_FOREACH_SAFE(vbc, &tp->connlist, list, vbc2) {
+ VTAILQ_REMOVE(&tp->connlist, vbc, list);
+ vbc->backend = NULL;
+ (void)close(vbc->fd);
+ vbc->fd = -1;
+ VBE_ReleaseConn(vbc);
+ }
+ VTAILQ_FOREACH_SAFE(vbc, &tp->killlist, list, vbc2) {
+ VTAILQ_REMOVE(&tp->killlist, vbc, list);
+ vbc->backend = NULL;
+ (void)close(vbc->fd);
+ vbc->fd = -1;
+ VBE_ReleaseConn(vbc);
+ }
FREE_OBJ(tp);
}
/*--------------------------------------------------------------------
+ * Open a new connection from pool. This is a distinct function since
+ * probing cannot use a recycled connection.
*/
int
@@ -156,3 +183,69 @@ VBT_Open(struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
}
return(s);
}
+
+/*--------------------------------------------------------------------
+ * Recycle a connection.
+ */
+
+void
+VBT_Recycle(struct tcp_pool *tp, struct vbc **vbc)
+{
+
+ CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
+ CHECK_OBJ_NOTNULL((*vbc), VBC_MAGIC);
+
+ Lck_Lock(&tp->mtx);
+ VTAILQ_INSERT_HEAD(&tp->connlist, *vbc, list);
+ Lck_Unlock(&tp->mtx);
+ *vbc = NULL;
+}
+
+/*--------------------------------------------------------------------
+ * Get a connection
+ */
+
+struct vbc *
+VBT_Get(struct tcp_pool *tp)
+{
+ struct vbc *vbc;
+ struct pollfd pfd;
+
+ CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
+
+ Lck_Lock(&tp->mtx);
+ vbc = VTAILQ_FIRST(&tp->connlist);
+ if (vbc != NULL) {
+ CHECK_OBJ_NOTNULL(vbc, VBC_MAGIC);
+
+ pfd.fd = vbc->fd;
+ pfd.events = POLLIN;
+ pfd.revents = 0;
+ if (poll(&pfd, 1, 0)) {
+ /*
+ * If this vbc is dead assume the rest of the list
+ * has also been chopped from the other end.
+ */
+ VSC_C_main->backend_toolate++;
+ do {
+ VTAILQ_REMOVE(&tp->connlist, vbc, list);
+#if 0
+ VTAILQ_INSERT_TAIL(&tp->killlist, vbc, list);
+#else
+ vbc->backend = NULL;
+ (void)close(vbc->fd);
+ vbc->fd = -1;
+ VBE_ReleaseConn(vbc);
+#endif
+ vbc = VTAILQ_FIRST(&tp->connlist);
+ } while (vbc != NULL);
+ } else {
+ VTAILQ_REMOVE(&tp->connlist, vbc, list);
+ VSC_C_main->backend_reuse += 1;
+ }
+ }
+ Lck_Unlock(&tp->mtx);
+ if (vbc != NULL)
+ return (vbc);
+ return (NULL);
+}
More information about the varnish-commit
mailing list