[master] b46b256 Roll tcp pools all the way in.
Poul-Henning Kamp
phk at FreeBSD.org
Mon Jan 19 10:58:57 CET 2015
commit b46b25679f237e1276dc238b03cd7534efdb7a28
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date: Mon Jan 19 09:58:11 2015 +0000
Roll tcp pools all the way in.
This also retires the vbc_mempool, which doesn't seem to improve things.
diff --git a/bin/varnishd/cache/cache_backend.c b/bin/varnishd/cache/cache_backend.c
index 1fac6d8..163fabe 100644
--- a/bin/varnishd/cache/cache_backend.c
+++ b/bin/varnishd/cache/cache_backend.c
@@ -42,11 +42,6 @@
#include "cache_backend.h"
#include "cache_director.h"
#include "vrt.h"
-#include "vtcp.h"
-
-static struct mempool *vbcpool;
-
-static unsigned vbcps = sizeof(struct vbc);
/*--------------------------------------------------------------------
* The "simple" director really isn't, since thats where all the actual
@@ -62,19 +57,6 @@ struct vbe_dir {
const struct vrt_backend *vrt;
};
-/*--------------------------------------------------------------------*/
-
-/* Private interface from backend_cfg.c */
-void
-VBE_ReleaseConn(struct vbc *vc)
-{
-
- CHECK_OBJ_NOTNULL(vc, VBC_MAGIC);
- assert(vc->backend == NULL);
- assert(vc->fd < 0);
- MPL_Free(vbcpool, vc);
-}
-
#define FIND_TMO(tmx, dst, bo, be) \
do { \
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); \
@@ -85,65 +67,6 @@ VBE_ReleaseConn(struct vbc *vc)
dst = cache_param->tmx; \
} while (0)
-/*--------------------------------------------------------------------*/
-
-static void
-bes_conn_try(struct busyobj *bo, struct vbc *vc, const struct vbe_dir *vs)
-{
- int s;
- double tmod;
- struct backend *bp = vs->backend;
- char abuf1[VTCP_ADDRBUFSIZE];
- char pbuf1[VTCP_PORTBUFSIZE];
-
- CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
- CHECK_OBJ_NOTNULL(vs, VDI_SIMPLE_MAGIC);
-
- Lck_Lock(&bp->mtx);
- bp->refcount++;
- bp->n_conn++; /* It mostly works */
- bp->vsc->conn++;
- Lck_Unlock(&bp->mtx);
-
- assert(bp->ipv6 != NULL || bp->ipv4 != NULL);
-
- /* release lock during stuff that can take a long time */
-
- FIND_TMO(connect_timeout, tmod, bo, vs->vrt);
- s = VBT_Open(bp->tcp_pool, tmod, &vc->addr);
-
- vc->fd = s;
- if (s < 0) {
- Lck_Lock(&bp->mtx);
- bp->n_conn--;
- bp->vsc->conn--;
- bp->refcount--; /* Only keep ref on success */
- Lck_Unlock(&bp->mtx);
- vc->addr = NULL;
- } else {
- VTCP_myname(s, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
- VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s ",
- vc->fd, vs->backend->display_name, abuf1, pbuf1);
- }
-}
-
-/*--------------------------------------------------------------------
- * Check that there is still something at the far end of a given socket.
- * We poll the fd with instant timeout, if there are any events we can't
- * use it (backends are not allowed to pipeline).
- */
-
-static int
-vbe_CheckFd(int fd)
-{
- struct pollfd pfd;
-
- pfd.fd = fd;
- pfd.events = POLLIN;
- pfd.revents = 0;
- return(poll(&pfd, 1, 0) == 0);
-}
-
/*--------------------------------------------------------------------
* Test if backend is healthy and report when it last changed
*/
@@ -166,80 +89,6 @@ VBE_Healthy(const struct backend *backend, double *changed)
}
/*--------------------------------------------------------------------
- * Get a connection to a particular backend.
- */
-
-static struct vbc *
-vbe_GetVbe(struct busyobj *bo, struct vbe_dir *vs)
-{
- struct vbc *vc;
- struct backend *bp;
-
- CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
- CHECK_OBJ_NOTNULL(vs, VDI_SIMPLE_MAGIC);
- bp = vs->backend;
- CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
-
- if (!VBE_Healthy(bp, NULL)) {
- VSC_C_main->backend_unhealthy++;
- return (NULL);
- }
-
- /* first look for vbc's we can recycle */
- while (1) {
- vc = VBT_Get(bp->tcp_pool);
- if (vc == NULL)
- break;
-
- Lck_Lock(&bp->mtx);
- bp->refcount++;
- assert(vc->backend == bp);
- assert(vc->fd >= 0);
- AN(vc->addr);
- Lck_Unlock(&bp->mtx);
-
- if (vbe_CheckFd(vc->fd)) {
- VSLb(bo->vsl, SLT_Backend, "%d %s %s",
- vc->fd, bo->director_resp->vcl_name,
- bp->display_name);
- vc->vdis = vs;
- vc->recycled = 1;
- return (vc);
- }
- VSLb(bo->vsl, SLT_BackendClose, "%d %s toolate",
- vc->fd, bp->display_name);
-
- VTCP_close(&vc->fd);
- VBE_DropRefConn(bp, NULL);
- vc->backend = NULL;
- VBE_ReleaseConn(vc);
- }
-
- if (vs->vrt->max_connections > 0 &&
- bp->n_conn >= vs->vrt->max_connections) {
- VSC_C_main->backend_busy++;
- return (NULL);
- }
-
- vc = MPL_Get(vbcpool, NULL);
- XXXAN(vc);
- vc->magic = VBC_MAGIC;
- vc->fd = -1;
- bes_conn_try(bo, vc, vs);
- if (vc->fd < 0) {
- VBE_ReleaseConn(vc);
- VSC_C_main->backend_fail++;
- return (NULL);
- }
- vc->backend = bp;
- VSC_C_main->backend_conn++;
- VSLb(bo->vsl, SLT_Backend, "%d %s %s",
- vc->fd, bo->director_resp->vcl_name, bp->display_name);
- vc->vdis = vs;
- return (vc);
-}
-
-/*--------------------------------------------------------------------
*
*/
@@ -278,7 +127,7 @@ VBE_DiscardHealth(const struct director *vdi)
}
/*--------------------------------------------------------------------
- *
+ * Get a connection to the backend
*/
static int __match_proto__(vdi_getfd_f)
@@ -286,17 +135,47 @@ vbe_dir_getfd(const struct director *d, struct busyobj *bo)
{
struct vbe_dir *vs;
struct vbc *vc;
+ struct backend *bp;
+ double tmod;
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CAST_OBJ_NOTNULL(vs, d->priv, VDI_SIMPLE_MAGIC);
+ bp = vs->backend;
+ CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
- vc = vbe_GetVbe(bo, vs);
+ if (!VBE_Healthy(bp, NULL)) {
+ // XXX: per backend stats ?
+ VSC_C_main->backend_unhealthy++;
+ return (-1);
+ }
+
+ if (vs->vrt->max_connections > 0 &&
+ bp->n_conn >= vs->vrt->max_connections) {
+ // XXX: per backend stats ?
+ VSC_C_main->backend_busy++;
+ return (-1);
+ }
+
+ FIND_TMO(connect_timeout, tmod, bo, vs->vrt);
+ vc = VBT_Get(bp->tcp_pool, tmod);
if (vc == NULL) {
+ // XXX: Per backend stats ?
+ VSC_C_main->backend_fail++;
VSLb(bo->vsl, SLT_FetchError, "no backend connection");
return (-1);
}
+ assert(vc->fd >= 0);
+ vc->backend = bp;
+ AN(vc->addr);
+
+ Lck_Lock(&bp->mtx);
+ bp->refcount++;
+ bp->n_conn++;
+ bp->vsc->conn++;
+ Lck_Unlock(&bp->mtx);
+
vc->backend->vsc->req++;
if (bo->htc == NULL)
bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc);
@@ -343,10 +222,8 @@ vbe_dir_finish(const struct director *d, struct worker *wrk,
if (bo->doclose != SC_NULL) {
VSLb(bo->vsl, SLT_BackendClose, "%d %s", bo->htc->vbc->fd,
bp->display_name);
- VTCP_close(&bo->htc->vbc->fd);
+ VBT_Close(bp->tcp_pool, &bo->htc->vbc);
VBE_DropRefConn(bp, &bo->acct);
- bo->htc->vbc->backend = NULL;
- VBE_ReleaseConn(bo->htc->vbc);
} else {
VSLb(bo->vsl, SLT_BackendReuse, "%d %s", bo->htc->vbc->fd,
bp->display_name);
@@ -482,11 +359,3 @@ VRT_init_vbe(VRT_CTX, struct director **bp, int idx,
bp[idx] = &vs->dir;
}
-
-void
-VBE_Init(void)
-{
-
- vbcpool = MPL_New("vbc", &cache_param->vbc_pool, &vbcps);
- AN(vbcpool);
-}
diff --git a/bin/varnishd/cache/cache_backend.h b/bin/varnishd/cache/cache_backend.h
index 0cda35e..63806ec 100644
--- a/bin/varnishd/cache/cache_backend.h
+++ b/bin/varnishd/cache/cache_backend.h
@@ -89,17 +89,14 @@ struct vbc {
unsigned magic;
#define VBC_MAGIC 0x0c5e6592
VTAILQ_ENTRY(vbc) list;
- struct backend *backend;
- struct vbe_dir *vdis;
int fd;
-
const struct suckaddr *addr;
-
uint8_t recycled;
+
+ struct backend *backend;
};
/* cache_backend.c */
-void VBE_ReleaseConn(struct vbc *vc);
void VBE_UseHealth(const struct director *vdi);
void VBE_DiscardHealth(const struct director *vdi);
@@ -127,6 +124,7 @@ struct tcp_pool *VBT_Ref(const char *name, const struct suckaddr *ip4,
void VBT_Rel(struct tcp_pool **tpp);
int VBT_Open(struct tcp_pool *tp, double tmo, const struct suckaddr **sa);
void VBT_Recycle(struct tcp_pool *tp, struct vbc **vbc);
-struct vbc *VBT_Get(struct tcp_pool *tp);
+void VBT_Close(struct tcp_pool *tp, struct vbc **vbc);
+struct vbc *VBT_Get(struct tcp_pool *tp, double tmo);
diff --git a/bin/varnishd/cache/cache_backend_tcp.c b/bin/varnishd/cache/cache_backend_tcp.c
index 5495cff..90f18fe 100644
--- a/bin/varnishd/cache/cache_backend_tcp.c
+++ b/bin/varnishd/cache/cache_backend_tcp.c
@@ -25,7 +25,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * TCP connection pools for backends
+ * TCP connection pools.
+ *
+ * These are really a lot more general than just backends, but backends
+ * are all we use them for, so they live here for now.
*
*/
@@ -57,7 +60,12 @@ struct tcp_pool {
struct lock mtx;
VTAILQ_HEAD(, vbc) connlist;
+ int n_conn;
+
VTAILQ_HEAD(, vbc) killlist;
+ int n_kill;
+
+ int n_used;
};
@@ -133,6 +141,7 @@ VBT_Rel(struct tcp_pool **tpp)
assert(tp->refcnt > 0);
if (--tp->refcnt > 0)
return;
+ AZ(tp->n_used);
VTAILQ_REMOVE(&pools, tp, list);
free(tp->name);
free(tp->ip4);
@@ -140,18 +149,18 @@ VBT_Rel(struct tcp_pool **tpp)
Lck_Delete(&tp->mtx);
VTAILQ_FOREACH_SAFE(vbc, &tp->connlist, list, vbc2) {
VTAILQ_REMOVE(&tp->connlist, vbc, list);
- vbc->backend = NULL;
- (void)close(vbc->fd);
- vbc->fd = -1;
- VBE_ReleaseConn(vbc);
+ tp->n_conn--;
+ VTCP_close(&vbc->fd);
+ FREE_OBJ(vbc);
}
VTAILQ_FOREACH_SAFE(vbc, &tp->killlist, list, vbc2) {
VTAILQ_REMOVE(&tp->killlist, vbc, list);
- vbc->backend = NULL;
- (void)close(vbc->fd);
- vbc->fd = -1;
- VBE_ReleaseConn(vbc);
+ tp->n_kill--;
+ VTCP_close(&vbc->fd);
+ FREE_OBJ(vbc);
}
+ AZ(tp->n_conn);
+ AZ(tp->n_kill);
FREE_OBJ(tp);
}
@@ -189,16 +198,42 @@ VBT_Open(struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
*/
void
-VBT_Recycle(struct tcp_pool *tp, struct vbc **vbc)
+VBT_Recycle(struct tcp_pool *tp, struct vbc **vbcp)
+{
+ struct vbc *vbc;
+
+ CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
+ vbc = *vbcp;
+ *vbcp = NULL;
+ CHECK_OBJ_NOTNULL(vbc, VBC_MAGIC);
+
+ Lck_Lock(&tp->mtx);
+ vbc->recycled = 1;
+ VTAILQ_INSERT_HEAD(&tp->connlist, vbc, list);
+ tp->n_conn++;
+ tp->n_used--;
+ Lck_Unlock(&tp->mtx);
+}
+
+/*--------------------------------------------------------------------
+ * Close a connection.
+ */
+
+void
+VBT_Close(struct tcp_pool *tp, struct vbc **vbcp)
{
+ struct vbc *vbc;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
- CHECK_OBJ_NOTNULL((*vbc), VBC_MAGIC);
+ vbc = *vbcp;
+ *vbcp = NULL;
+ CHECK_OBJ_NOTNULL(vbc, VBC_MAGIC);
+ VTCP_close(&vbc->fd);
+ FREE_OBJ(vbc);
Lck_Lock(&tp->mtx);
- VTAILQ_INSERT_HEAD(&tp->connlist, *vbc, list);
+ tp->n_used--;
Lck_Unlock(&tp->mtx);
- *vbc = NULL;
}
/*--------------------------------------------------------------------
@@ -206,11 +241,12 @@ VBT_Recycle(struct tcp_pool *tp, struct vbc **vbc)
*/
struct vbc *
-VBT_Get(struct tcp_pool *tp)
+VBT_Get(struct tcp_pool *tp, double tmo)
{
struct vbc *vbc;
struct pollfd pfd;
+ (void)tmo;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
Lck_Lock(&tp->mtx);
@@ -229,23 +265,40 @@ VBT_Get(struct tcp_pool *tp)
VSC_C_main->backend_toolate++;
do {
VTAILQ_REMOVE(&tp->connlist, vbc, list);
+ tp->n_conn--;
#if 0
VTAILQ_INSERT_TAIL(&tp->killlist, vbc, list);
+ tp->n_kill++;
#else
- vbc->backend = NULL;
- (void)close(vbc->fd);
- vbc->fd = -1;
- VBE_ReleaseConn(vbc);
+ VTCP_close(&vbc->fd);
+ FREE_OBJ(vbc);
#endif
vbc = VTAILQ_FIRST(&tp->connlist);
} while (vbc != NULL);
} else {
VTAILQ_REMOVE(&tp->connlist, vbc, list);
+ tp->n_conn--;
+ tp->n_used++;
VSC_C_main->backend_reuse += 1;
}
}
+ if (vbc == NULL)
+ tp->n_used++; // Opening mostly works
Lck_Unlock(&tp->mtx);
+
if (vbc != NULL)
return (vbc);
- return (NULL);
+
+ ALLOC_OBJ(vbc, VBC_MAGIC);
+ if (vbc != NULL) {
+ vbc->fd = VBT_Open(tp, tmo, &vbc->addr);
+ if (vbc->fd < 0)
+ FREE_OBJ(vbc);
+ }
+ if (vbc == NULL) {
+ Lck_Lock(&tp->mtx);
+ tp->n_used--;
+ Lck_Unlock(&tp->mtx);
+ }
+ return (vbc);
}
diff --git a/bin/varnishd/cache/cache_director.h b/bin/varnishd/cache/cache_director.h
index d204ef0..49611ab 100644
--- a/bin/varnishd/cache/cache_director.h
+++ b/bin/varnishd/cache/cache_director.h
@@ -82,6 +82,3 @@ void VDI_Finish(struct worker *wrk, struct busyobj *bo);
int VDI_Http1Pipe(struct req *, struct busyobj *);
int VDI_Healthy(const struct director *, const struct busyobj *);
-
-void VBE_Init(void);
-
diff --git a/bin/varnishd/cache/cache_main.c b/bin/varnishd/cache/cache_main.c
index bc55e99..469134d 100644
--- a/bin/varnishd/cache/cache_main.c
+++ b/bin/varnishd/cache/cache_main.c
@@ -221,7 +221,6 @@ child_main(void)
HTTP_Init();
- VBE_Init();
VBO_Init();
VBE_InitCfg();
VBP_Init();
diff --git a/bin/varnishd/common/params.h b/bin/varnishd/common/params.h
index 2ce95c6..cdb8646 100644
--- a/bin/varnishd/common/params.h
+++ b/bin/varnishd/common/params.h
@@ -211,7 +211,6 @@ struct params {
ssize_t vsm_space;
ssize_t vsl_space;
- struct poolparam vbc_pool;
struct poolparam req_pool;
struct poolparam sess_pool;
struct poolparam vbo_pool;
diff --git a/bin/varnishd/mgt/mgt_param_tbl.c b/bin/varnishd/mgt/mgt_param_tbl.c
index 50ba8ea..fc9dc78 100644
--- a/bin/varnishd/mgt/mgt_param_tbl.c
+++ b/bin/varnishd/mgt/mgt_param_tbl.c
@@ -615,13 +615,6 @@ struct parspec mgt_parspec[] = {
0,
"off", "bool"},
- { "pool_vbc", tweak_poolparam, &mgt_param.vbc_pool,
- NULL, NULL,
- "Parameters for backend connection memory pool.\n"
- MEMPOOL_TEXT,
- 0,
- "10,100,10", ""},
-
{ "pool_req", tweak_poolparam, &mgt_param.req_pool,
NULL, NULL,
"Parameters for per worker pool request memory pool.\n"
diff --git a/bin/varnishtest/tests/b00034.vtc b/bin/varnishtest/tests/b00034.vtc
index 8e23d60..e3ba26b 100644
--- a/bin/varnishtest/tests/b00034.vtc
+++ b/bin/varnishtest/tests/b00034.vtc
@@ -5,9 +5,9 @@ server s1 {
varnish v1 -vcl+backend {}
-varnish v1 -cliok "param.set pool_vbc 1,10,1"
-varnish v1 -clierr 106 "param.set pool_vbc 10"
-varnish v1 -clierr 106 "param.set pool_vbc 10,1,1"
-varnish v1 -clierr 106 "param.set pool_vbc a,10,10"
-varnish v1 -clierr 106 "param.set pool_vbc 10,a,10"
-varnish v1 -clierr 106 "param.set pool_vbc 10,10,a"
+varnish v1 -cliok "param.set pool_req 1,10,1"
+varnish v1 -clierr 106 "param.set pool_req 10"
+varnish v1 -clierr 106 "param.set pool_req 10,1,1"
+varnish v1 -clierr 106 "param.set pool_req a,10,10"
+varnish v1 -clierr 106 "param.set pool_req 10,a,10"
+varnish v1 -clierr 106 "param.set pool_req 10,10,a"
diff --git a/bin/varnishtest/tests/c00050.vtc b/bin/varnishtest/tests/c00050.vtc
index 2e787cb..6a3b35d 100644
--- a/bin/varnishtest/tests/c00050.vtc
+++ b/bin/varnishtest/tests/c00050.vtc
@@ -6,24 +6,24 @@ varnish v1 -vcl+backend {} -start
delay 2
-varnish v1 -expect MEMPOOL.vbc.pool == 10
+varnish v1 -expect MEMPOOL.req0.pool == 10
-varnish v1 -cliok "param.set pool_vbc 90,100,100"
+varnish v1 -cliok "param.set pool_req 90,100,100"
delay 2
-varnish v1 -expect MEMPOOL.vbc.pool == 90
+varnish v1 -expect MEMPOOL.req0.pool == 90
-varnish v1 -cliok "param.set pool_vbc 50,80,100"
+varnish v1 -cliok "param.set pool_req 50,80,100"
delay 2
-varnish v1 -expect MEMPOOL.vbc.pool == 80
-varnish v1 -expect MEMPOOL.vbc.surplus == 10
+varnish v1 -expect MEMPOOL.req0.pool == 80
+varnish v1 -expect MEMPOOL.req0.surplus == 10
-varnish v1 -cliok "param.set pool_vbc 10,80,1"
+varnish v1 -cliok "param.set pool_req 10,80,1"
delay 2
-varnish v1 -expect MEMPOOL.vbc.pool == 10
-varnish v1 -expect MEMPOOL.vbc.timeout == 70
+varnish v1 -expect MEMPOOL.req0.pool == 10
+varnish v1 -expect MEMPOOL.req0.timeout == 70
More information about the varnish-commit
mailing list