[master] 3c0b8768a The backend probes can only access the backend under lock, because the backend might be going away, and since we cannot afford to hold the lock over VTP_Open(), we have to pull the VBE_vsc knowledge one level back up.
Poul-Henning Kamp
phk at FreeBSD.org
Thu Jun 14 08:39:07 UTC 2018
commit 3c0b8768afda0cc657fb902d1d390bdd0e8b7ce5
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date: Thu Jun 14 08:37:05 2018 +0000
The backend probes can only access the backend under lock, because
the backend might be going away, and since we cannot afford to hold
the lock over VTP_Open(), we have to pull the VBE_vsc knowledge
one level back up.
Overlooked by: slink, phk
Spotted by: Coverity
diff --git a/bin/varnishd/cache/cache_backend.c b/bin/varnishd/cache/cache_backend.c
index a6e176049..4f542f548 100644
--- a/bin/varnishd/cache/cache_backend.c
+++ b/bin/varnishd/cache/cache_backend.c
@@ -58,6 +58,41 @@ static struct lock backends_mtx;
/*--------------------------------------------------------------------*/
+void
+VBE_Connect_Error(struct VSC_vbe *vsc, int err)
+{
+
+ switch(err) {
+ case 0:
+ /*
+ * This is kind of brittle, but zero is the only
+ * value of errno we can trust to have no meaning.
+ */
+ vsc->helddown++;
+ break;
+ case EACCES:
+ case EPERM:
+ vsc->fail_eacces++;
+ break;
+ case EADDRNOTAVAIL:
+ vsc->fail_eaddrnotavail++;
+ break;
+ case ECONNREFUSED:
+ vsc->fail_econnrefused++;
+ break;
+ case ENETUNREACH:
+ vsc->fail_enetunreach++;
+ break;
+ case ETIMEDOUT:
+ vsc->fail_etimedout++;
+ break;
+ default:
+ vsc->fail_other++;
+ }
+}
+
+/*--------------------------------------------------------------------*/
+
#define FIND_TMO(tmx, dst, bo, be) \
do { \
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); \
@@ -77,7 +112,7 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo,
unsigned force_fresh)
{
struct pfd *pfd;
- int *fdp;
+ int *fdp, err;
double tmod;
char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE];
char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE];
@@ -113,11 +148,12 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo,
bo->htc->doclose = SC_NULL;
FIND_TMO(connect_timeout, tmod, bo, bp);
- pfd = VTP_Get(bp->tcp_pool, tmod, wrk, force_fresh, bp->vsc);
+ pfd = VTP_Get(bp->tcp_pool, tmod, wrk, force_fresh, &err);
if (pfd == NULL) {
+ VBE_Connect_Error(bp->vsc, err);
VSLb(bo->vsl, SLT_FetchError,
"backend %s: fail errno %d (%s)",
- VRT_BACKEND_string(bp->director), errno, strerror(errno));
+ VRT_BACKEND_string(bp->director), err, strerror(err));
VSC_C_main->backend_fail++;
bo->htc = NULL;
return (NULL);
diff --git a/bin/varnishd/cache/cache_backend.h b/bin/varnishd/cache/cache_backend.h
index 940e2426d..706fea428 100644
--- a/bin/varnishd/cache/cache_backend.h
+++ b/bin/varnishd/cache/cache_backend.h
@@ -82,3 +82,5 @@ void VBP_Insert(struct backend *b, struct vrt_backend_probe const *p,
void VBP_Remove(struct backend *b);
void VBP_Control(const struct backend *b, int stop);
void VBP_Status(struct vsb *, const struct backend *, int details, int json);
+void VBE_Connect_Error(struct VSC_vbe *, int err);
+
diff --git a/bin/varnishd/cache/cache_backend_probe.c b/bin/varnishd/cache/cache_backend_probe.c
index dfa594ce8..a5e67c021 100644
--- a/bin/varnishd/cache/cache_backend_probe.c
+++ b/bin/varnishd/cache/cache_backend_probe.c
@@ -271,7 +271,7 @@ vbp_write_proxy_v1(struct vbp_target *vt, int *sock)
static void
vbp_poke(struct vbp_target *vt)
{
- int s, tmo, i, proxy_header;
+ int s, tmo, i, proxy_header, err;
double t_start, t_now, t_end;
unsigned rlen, resp;
char buf[8192], *p;
@@ -281,11 +281,13 @@ vbp_poke(struct vbp_target *vt)
t_start = t_now = VTIM_real();
t_end = t_start + vt->timeout;
- s = VTP_Open(vt->tcp_pool, t_end - t_now, (const void **)&sa,
- vt->backend->vsc);
+ s = VTP_Open(vt->tcp_pool, t_end - t_now, (const void **)&sa, &err);
if (s < 0) {
- bprintf(vt->resp_buf, "Open error %d (%s)",
- errno, strerror(errno));
+ bprintf(vt->resp_buf, "Open error %d (%s)", err, strerror(err));
+ Lck_Lock(&vbp_mtx);
+ if (vt->backend)
+ VBE_Connect_Error(vt->backend->vsc, err);
+ Lck_Unlock(&vbp_mtx);
return;
}
diff --git a/bin/varnishd/cache/cache_tcp_pool.c b/bin/varnishd/cache/cache_tcp_pool.c
index d79273586..f33668b2f 100644
--- a/bin/varnishd/cache/cache_tcp_pool.c
+++ b/bin/varnishd/cache/cache_tcp_pool.c
@@ -45,8 +45,6 @@
#include "cache_tcp_pool.h"
#include "cache_pool.h"
-#include "VSC_vbe.h"
-
struct conn_pool;
/*--------------------------------------------------------------------
@@ -379,13 +377,13 @@ VCP_Recycle(const struct worker *wrk, struct pfd **pfdp)
*/
static int
-VCP_Open(struct conn_pool *cp, double tmo, const void **privp,
- struct VSC_vbe *vsc)
+VCP_Open(struct conn_pool *cp, double tmo, const void **privp, int *err)
{
int r;
double h;
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
+ AN(err);
while (cp->holddown > 0) {
Lck_Lock(&cp->mtx);
@@ -400,8 +398,7 @@ VCP_Open(struct conn_pool *cp, double tmo, const void **privp,
break;
}
- if (vsc)
- vsc->helddown++;
+ *err = 0;
errno = cp->holddown_errno;
Lck_Unlock(&cp->mtx);
return (-1);
@@ -409,7 +406,9 @@ VCP_Open(struct conn_pool *cp, double tmo, const void **privp,
r = cp->methods->open(cp, tmo, privp);
- if (r >= 0 || vsc == NULL)
+ *err = errno;
+
+ if (r >= 0)
return (r);
h = 0;
@@ -418,28 +417,20 @@ VCP_Open(struct conn_pool *cp, double tmo, const void **privp,
switch (errno) {
case EACCES:
case EPERM:
- vsc->fail_eacces++;
h = cache_param->backend_local_error_holddown;
break;
case EADDRNOTAVAIL:
- vsc->fail_eaddrnotavail++;
h = cache_param->backend_local_error_holddown;
break;
case ECONNREFUSED:
- vsc->fail_econnrefused++;
h = cache_param->backend_remote_error_holddown;
break;
case ENETUNREACH:
- vsc->fail_enetunreach++;
h = cache_param->backend_remote_error_holddown;
break;
- case ETIMEDOUT:
- vsc->fail_etimedout++;
- break;
default:
- vsc->fail_other++;
+ break;
}
- vsc->fail++;
if (h == 0)
return (r);
@@ -498,13 +489,15 @@ VCP_Close(struct pfd **pfdp)
static struct pfd *
VCP_Get(struct conn_pool *cp, double tmo, struct worker *wrk,
- unsigned force_fresh, struct VSC_vbe *vsc)
+ unsigned force_fresh, int *err)
{
struct pfd *pfd;
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+ AN(err);
+ *err = 0;
Lck_Lock(&cp->mtx);
pfd = VTAILQ_FIRST(&cp->connlist);
CHECK_OBJ_ORNULL(pfd, PFD_MAGIC);
@@ -531,7 +524,7 @@ VCP_Get(struct conn_pool *cp, double tmo, struct worker *wrk,
INIT_OBJ(pfd->waited, WAITED_MAGIC);
pfd->state = PFD_STATE_USED;
pfd->conn_pool = cp;
- pfd->fd = VCP_Open(cp, tmo, &pfd->priv, vsc);
+ pfd->fd = VCP_Open(cp, tmo, &pfd->priv, err);
if (pfd->fd < 0) {
FREE_OBJ(pfd);
Lck_Lock(&cp->mtx);
@@ -809,10 +802,9 @@ VTP_Rel(struct tcp_pool **tpp)
*/
int
-VTP_Open(struct tcp_pool *tp, double tmo, const void **privp,
- struct VSC_vbe *vsc)
+VTP_Open(struct tcp_pool *tp, double tmo, const void **privp, int *err)
{
- return (VCP_Open(tp->cp, tmo, privp, vsc));
+ return (VCP_Open(tp->cp, tmo, privp, err));
}
/*--------------------------------------------------------------------
@@ -843,10 +835,10 @@ VTP_Close(struct pfd **pfdp)
struct pfd *
VTP_Get(struct tcp_pool *tp, double tmo, struct worker *wrk,
- unsigned force_fresh, struct VSC_vbe *vsc)
+ unsigned force_fresh, int *err)
{
- return VCP_Get(tp->cp, tmo, wrk, force_fresh, vsc);
+ return VCP_Get(tp->cp, tmo, wrk, force_fresh, err);
}
/*--------------------------------------------------------------------
diff --git a/bin/varnishd/cache/cache_tcp_pool.h b/bin/varnishd/cache/cache_tcp_pool.h
index 9f640da73..7c25afda2 100644
--- a/bin/varnishd/cache/cache_tcp_pool.h
+++ b/bin/varnishd/cache/cache_tcp_pool.h
@@ -72,11 +72,10 @@ void VTP_Rel(struct tcp_pool **);
* the pool is destroyed and all cached connections closed.
*/
-int VTP_Open(struct tcp_pool *, double tmo, const void **,
- struct VSC_vbe *);
+int VTP_Open(struct tcp_pool *, double tmo, const void **, int*);
/*
* Open a new connection and return the adress used.
- * Errors will be accounted in the optional vsc
+ * errno will be returned in the last argument.
*/
void VTP_Close(struct pfd **);
@@ -90,10 +89,10 @@ void VTP_Recycle(const struct worker *, struct pfd **);
*/
struct pfd *VTP_Get(struct tcp_pool *, double tmo, struct worker *,
- unsigned force_fresh, struct VSC_vbe *);
+ unsigned force_fresh, int *err);
/*
* Get a (possibly) recycled connection.
- * Open errors will be accounted in the optional vsc
+ * errno will be stored in err
*/
int VTP_Wait(struct worker *, struct pfd *, double tmo);
More information about the varnish-commit
mailing list