[master] 56f4586 Rename tcp_pools to VTP
Poul-Henning Kamp
phk at FreeBSD.org
Thu Oct 5 08:59:10 UTC 2017
commit 56f45866614bc99cd6a287a82e6029a351c9d088
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date: Thu Oct 5 08:45:36 2017 +0000
Rename tcp_pools to VTP
diff --git a/bin/varnishd/cache/cache_backend.c b/bin/varnishd/cache/cache_backend.c
index 6473be0..a642304 100644
--- a/bin/varnishd/cache/cache_backend.c
+++ b/bin/varnishd/cache/cache_backend.c
@@ -63,10 +63,10 @@
* Get a connection to the backend
*/
-static struct vbc *
+static struct vtp *
vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo)
{
- struct vbc *vbc;
+ struct vtp *vtp;
double tmod;
char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE];
char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE];
@@ -96,16 +96,16 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo)
bo->htc->doclose = SC_NULL;
FIND_TMO(connect_timeout, tmod, bo, bp);
- vbc = VBT_Get(bp->tcp_pool, tmod, wrk);
- if (vbc == NULL) {
+ vtp = VTP_Get(bp->tcp_pool, tmod, wrk);
+ if (vtp == NULL) {
// XXX: Per backend stats ?
VSC_C_main->backend_fail++;
bo->htc = NULL;
return (NULL);
}
- assert(vbc->fd >= 0);
- AN(vbc->addr);
+ assert(vtp->fd >= 0);
+ AN(vtp->addr);
Lck_Lock(&bp->mtx);
bp->n_conn++;
@@ -114,21 +114,21 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo)
Lck_Unlock(&bp->mtx);
if (bp->proxy_header != 0)
- VPX_Send_Proxy(vbc->fd, bp->proxy_header, bo->sp);
+ VPX_Send_Proxy(vtp->fd, bp->proxy_header, bo->sp);
- VTCP_myname(vbc->fd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
- VTCP_hisname(vbc->fd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
+ VTCP_myname(vtp->fd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
+ VTCP_hisname(vtp->fd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s",
- vbc->fd, bp->display_name, abuf2, pbuf2, abuf1, pbuf1);
+ vtp->fd, bp->display_name, abuf2, pbuf2, abuf1, pbuf1);
INIT_OBJ(bo->htc, HTTP_CONN_MAGIC);
- bo->htc->priv = vbc;
- bo->htc->rfd = &vbc->fd;
+ bo->htc->priv = vtp;
+ bo->htc->rfd = &vtp->fd;
FIND_TMO(first_byte_timeout,
bo->htc->first_byte_timeout, bo, bp);
FIND_TMO(between_bytes_timeout,
bo->htc->between_bytes_timeout, bo, bp);
- return (vbc);
+ return (vtp);
}
static unsigned __match_proto__(vdi_healthy_f)
@@ -148,7 +148,7 @@ vbe_dir_finish(const struct director *d, struct worker *wrk,
struct busyobj *bo)
{
struct backend *bp;
- struct vbc *vbc;
+ struct vtp *vtp;
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
@@ -156,21 +156,21 @@ vbe_dir_finish(const struct director *d, struct worker *wrk,
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
- CAST_OBJ_NOTNULL(vbc, bo->htc->priv, VBC_MAGIC);
+ CAST_OBJ_NOTNULL(vtp, bo->htc->priv, VTP_MAGIC);
bo->htc->priv = NULL;
- if (vbc->state != VBC_STATE_USED)
- VBT_Wait(wrk, vbc);
+ if (vtp->state != VTP_STATE_USED)
+ VTP_Wait(wrk, vtp);
if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) {
- VSLb(bo->vsl, SLT_BackendClose, "%d %s", vbc->fd,
+ VSLb(bo->vsl, SLT_BackendClose, "%d %s", vtp->fd,
bp->display_name);
- VBT_Close(bp->tcp_pool, &vbc);
+ VTP_Close(bp->tcp_pool, &vtp);
Lck_Lock(&bp->mtx);
} else {
- VSLb(bo->vsl, SLT_BackendReuse, "%d %s", vbc->fd,
+ VSLb(bo->vsl, SLT_BackendReuse, "%d %s", vtp->fd,
bp->display_name);
Lck_Lock(&bp->mtx);
VSC_C_main->backend_recycle++;
- VBT_Recycle(wrk, bp->tcp_pool, &vbc);
+ VTP_Recycle(wrk, bp->tcp_pool, &vtp);
}
assert(bp->n_conn > 0);
bp->n_conn--;
@@ -187,7 +187,7 @@ vbe_dir_gethdrs(const struct director *d, struct worker *wrk,
{
int i, extrachance = 1;
struct backend *bp;
- struct vbc *vbc;
+ struct vtp *vtp;
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
@@ -203,21 +203,21 @@ vbe_dir_gethdrs(const struct director *d, struct worker *wrk,
http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr);
do {
- vbc = vbe_dir_getfd(wrk, bp, bo);
- if (vbc == NULL) {
+ vtp = vbe_dir_getfd(wrk, bp, bo);
+ if (vtp == NULL) {
VSLb(bo->vsl, SLT_FetchError, "no backend connection");
return (-1);
}
AN(bo->htc);
- if (vbc->state != VBC_STATE_STOLEN)
+ if (vtp->state != VTP_STATE_STOLEN)
extrachance = 0;
i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes, 0);
- if (vbc->state != VBC_STATE_USED)
- VBT_Wait(wrk, vbc);
+ if (vtp->state != VTP_STATE_USED)
+ VTP_Wait(wrk, vtp);
- assert(vbc->state == VBC_STATE_USED);
+ assert(vtp->state == VTP_STATE_USED);
if (i == 0)
i = V1F_FetchRespHdr(bo);
@@ -248,15 +248,15 @@ static const struct suckaddr * __match_proto__(vdi_getip_f)
vbe_dir_getip(const struct director *d, struct worker *wrk,
struct busyobj *bo)
{
- struct vbc *vbc;
+ struct vtp *vtp;
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
- CAST_OBJ_NOTNULL(vbc, bo->htc->priv, VBC_MAGIC);
+ CAST_OBJ_NOTNULL(vtp, bo->htc->priv, VTP_MAGIC);
- return (vbc->addr);
+ return (vtp->addr);
}
/*--------------------------------------------------------------------*/
@@ -268,7 +268,7 @@ vbe_dir_http1pipe(const struct director *d, struct req *req, struct busyobj *bo)
enum sess_close retval;
struct backend *bp;
struct v1p_acct v1a;
- struct vbc *vbc;
+ struct vtp *vtp;
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
@@ -283,16 +283,16 @@ vbe_dir_http1pipe(const struct director *d, struct req *req, struct busyobj *bo)
req->res_mode = RES_PIPE;
- vbc = vbe_dir_getfd(req->wrk, bp, bo);
+ vtp = vbe_dir_getfd(req->wrk, bp, bo);
- if (vbc == NULL) {
+ if (vtp == NULL) {
VSLb(bo->vsl, SLT_FetchError, "no backend connection");
retval = SC_TX_ERROR;
} else {
i = V1F_SendReq(req->wrk, bo, &v1a.bereq, 1);
VSLb_ts_req(req, "Pipe", W_TIM_real(req->wrk));
if (i == 0)
- V1P_Process(req, vbc->fd, &v1a);
+ V1P_Process(req, vtp->fd, &v1a);
VSLb_ts_req(req, "PipeSess", W_TIM_real(req->wrk));
bo->htc->doclose = SC_TX_PIPE;
vbe_dir_finish(d, req->wrk, bo);
diff --git a/bin/varnishd/cache/cache_backend_cfg.c b/bin/varnishd/cache/cache_backend_cfg.c
index 6732aca..ca828b9 100644
--- a/bin/varnishd/cache/cache_backend_cfg.c
+++ b/bin/varnishd/cache/cache_backend_cfg.c
@@ -116,9 +116,9 @@ VRT_new_backend(VRT_CTX, const struct vrt_backend *vrt)
Lck_Lock(&backends_mtx);
VTAILQ_INSERT_TAIL(&backends, b, list);
VSC_C_main->n_backend++;
- b->tcp_pool = VBT_Ref(vrt->ipv4_suckaddr, vrt->ipv6_suckaddr);
+ b->tcp_pool = VTP_Ref(vrt->ipv4_suckaddr, vrt->ipv6_suckaddr);
if (vbp != NULL) {
- tp = VBT_Ref(vrt->ipv4_suckaddr, vrt->ipv6_suckaddr);
+ tp = VTP_Ref(vrt->ipv4_suckaddr, vrt->ipv6_suckaddr);
assert(b->tcp_pool == tp);
}
Lck_Unlock(&backends_mtx);
@@ -222,7 +222,7 @@ VBE_Delete(struct backend *be)
else
VTAILQ_REMOVE(&backends, be, list);
VSC_C_main->n_backend--;
- VBT_Rel(&be->tcp_pool);
+ VTP_Rel(&be->tcp_pool);
Lck_Unlock(&backends_mtx);
#define DA(x) do { if (be->x != NULL) free(be->x); } while (0)
diff --git a/bin/varnishd/cache/cache_backend_probe.c b/bin/varnishd/cache/cache_backend_probe.c
index 028fb33..4bf48ba 100644
--- a/bin/varnishd/cache/cache_backend_probe.c
+++ b/bin/varnishd/cache/cache_backend_probe.c
@@ -104,7 +104,7 @@ vbp_delete(struct vbp_target *vt)
#define DN(x) /**/
VRT_BACKEND_PROBE_HANDLE();
#undef DN
- VBT_Rel(&vt->tcp_pool);
+ VTP_Rel(&vt->tcp_pool);
free(vt->req);
FREE_OBJ(vt);
}
@@ -276,7 +276,7 @@ vbp_poke(struct vbp_target *vt)
t_start = t_now = VTIM_real();
t_end = t_start + vt->timeout;
- s = VBT_Open(vt->tcp_pool, t_end - t_now, &sa);
+ s = VTP_Open(vt->tcp_pool, t_end - t_now, &sa);
if (s < 0) {
/* Got no connection: failed */
return;
diff --git a/bin/varnishd/cache/cache_main.c b/bin/varnishd/cache/cache_main.c
index 778790b..5622bf6 100644
--- a/bin/varnishd/cache/cache_main.c
+++ b/bin/varnishd/cache/cache_main.c
@@ -259,7 +259,7 @@ child_main(void)
HTTP_Init();
VBO_Init();
- VBT_Init();
+ VTP_Init();
VBP_Init();
VBE_InitCfg();
Pool_Init();
diff --git a/bin/varnishd/cache/cache_tcp_pool.c b/bin/varnishd/cache/cache_tcp_pool.c
index bd8d9ff..be19ecb 100644
--- a/bin/varnishd/cache/cache_tcp_pool.c
+++ b/bin/varnishd/cache/cache_tcp_pool.c
@@ -56,10 +56,10 @@ struct tcp_pool {
int refcnt;
struct lock mtx;
- VTAILQ_HEAD(, vbc) connlist;
+ VTAILQ_HEAD(, vtp) connlist;
int n_conn;
- VTAILQ_HEAD(, vbc) killlist;
+ VTAILQ_HEAD(, vtp) killlist;
int n_kill;
int n_used;
@@ -76,39 +76,39 @@ static VTAILQ_HEAD(, tcp_pool) pools = VTAILQ_HEAD_INITIALIZER(pools);
static void __match_proto__(waiter_handle_f)
tcp_handle(struct waited *w, enum wait_event ev, double now)
{
- struct vbc *vbc;
+ struct vtp *vtp;
struct tcp_pool *tp;
- CAST_OBJ_NOTNULL(vbc, w->priv1, VBC_MAGIC);
+ CAST_OBJ_NOTNULL(vtp, w->priv1, VTP_MAGIC);
(void)ev;
(void)now;
- CHECK_OBJ_NOTNULL(vbc->tcp_pool, TCP_POOL_MAGIC);
- tp = vbc->tcp_pool;
+ CHECK_OBJ_NOTNULL(vtp->tcp_pool, TCP_POOL_MAGIC);
+ tp = vtp->tcp_pool;
Lck_Lock(&tp->mtx);
- switch (vbc->state) {
- case VBC_STATE_STOLEN:
- vbc->state = VBC_STATE_USED;
- VTAILQ_REMOVE(&tp->connlist, vbc, list);
- AN(vbc->cond);
- AZ(pthread_cond_signal(vbc->cond));
+ switch (vtp->state) {
+ case VTP_STATE_STOLEN:
+ vtp->state = VTP_STATE_USED;
+ VTAILQ_REMOVE(&tp->connlist, vtp, list);
+ AN(vtp->cond);
+ AZ(pthread_cond_signal(vtp->cond));
break;
- case VBC_STATE_AVAIL:
- VTCP_close(&vbc->fd);
- VTAILQ_REMOVE(&tp->connlist, vbc, list);
+ case VTP_STATE_AVAIL:
+ VTCP_close(&vtp->fd);
+ VTAILQ_REMOVE(&tp->connlist, vtp, list);
tp->n_conn--;
- FREE_OBJ(vbc);
+ FREE_OBJ(vtp);
break;
- case VBC_STATE_CLEANUP:
- VTCP_close(&vbc->fd);
+ case VTP_STATE_CLEANUP:
+ VTCP_close(&vtp->fd);
tp->n_kill--;
- VTAILQ_REMOVE(&tp->killlist, vbc, list);
- memset(vbc, 0x11, sizeof *vbc);
- free(vbc);
+ VTAILQ_REMOVE(&tp->killlist, vtp, list);
+ memset(vtp, 0x11, sizeof *vtp);
+ free(vtp);
break;
default:
- WRONG("Wrong vbc state");
+ WRONG("Wrong vtp state");
}
Lck_Unlock(&tp->mtx);
}
@@ -119,7 +119,7 @@ tcp_handle(struct waited *w, enum wait_event ev, double now)
*/
struct tcp_pool *
-VBT_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6)
+VTP_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6)
{
struct tcp_pool *tp;
@@ -173,10 +173,10 @@ VBT_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6)
*/
void
-VBT_Rel(struct tcp_pool **tpp)
+VTP_Rel(struct tcp_pool **tpp)
{
struct tcp_pool *tp;
- struct vbc *vbc, *vbc2;
+ struct vtp *vtp, *vtp2;
TAKE_OBJ_NOTNULL(tp, tpp, TCP_POOL_MAGIC);
@@ -194,13 +194,13 @@ VBT_Rel(struct tcp_pool **tpp)
free(tp->ip4);
free(tp->ip6);
Lck_Lock(&tp->mtx);
- VTAILQ_FOREACH_SAFE(vbc, &tp->connlist, list, vbc2) {
- VTAILQ_REMOVE(&tp->connlist, vbc, list);
+ VTAILQ_FOREACH_SAFE(vtp, &tp->connlist, list, vtp2) {
+ VTAILQ_REMOVE(&tp->connlist, vtp, list);
tp->n_conn--;
- assert(vbc->state == VBC_STATE_AVAIL);
- vbc->state = VBC_STATE_CLEANUP;
- (void)shutdown(vbc->fd, SHUT_WR);
- VTAILQ_INSERT_TAIL(&tp->killlist, vbc, list);
+ assert(vtp->state == VTP_STATE_AVAIL);
+ vtp->state = VTP_STATE_CLEANUP;
+ (void)shutdown(vtp->fd, SHUT_WR);
+ VTAILQ_INSERT_TAIL(&tp->killlist, vtp, list);
tp->n_kill++;
}
while (tp->n_kill) {
@@ -222,7 +222,7 @@ VBT_Rel(struct tcp_pool **tpp)
*/
int
-VBT_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
+VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
{
int s;
int msec;
@@ -250,41 +250,41 @@ VBT_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
*/
void
-VBT_Recycle(const struct worker *wrk, struct tcp_pool *tp, struct vbc **vbcp)
+VTP_Recycle(const struct worker *wrk, struct tcp_pool *tp, struct vtp **vtpp)
{
- struct vbc *vbc;
+ struct vtp *vtp;
int i = 0;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
- vbc = *vbcp;
- *vbcp = NULL;
- CHECK_OBJ_NOTNULL(vbc, VBC_MAGIC);
+ vtp = *vtpp;
+ *vtpp = NULL;
+ CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
- assert(vbc->state == VBC_STATE_USED);
- assert(vbc->fd > 0);
+ assert(vtp->state == VTP_STATE_USED);
+ assert(vtp->fd > 0);
Lck_Lock(&tp->mtx);
tp->n_used--;
- vbc->waited->priv1 = vbc;
- vbc->waited->fd = vbc->fd;
- vbc->waited->idle = VTIM_real();
- vbc->state = VBC_STATE_AVAIL;
- vbc->waited->func = tcp_handle;
- vbc->waited->tmo = &cache_param->backend_idle_timeout;
- if (Wait_Enter(wrk->pool->waiter, vbc->waited)) {
- VTCP_close(&vbc->fd);
- memset(vbc, 0x33, sizeof *vbc);
- free(vbc);
+ vtp->waited->priv1 = vtp;
+ vtp->waited->fd = vtp->fd;
+ vtp->waited->idle = VTIM_real();
+ vtp->state = VTP_STATE_AVAIL;
+ vtp->waited->func = tcp_handle;
+ vtp->waited->tmo = &cache_param->backend_idle_timeout;
+ if (Wait_Enter(wrk->pool->waiter, vtp->waited)) {
+ VTCP_close(&vtp->fd);
+ memset(vtp, 0x33, sizeof *vtp);
+ free(vtp);
// XXX: stats
- vbc = NULL;
+ vtp = NULL;
} else {
- VTAILQ_INSERT_HEAD(&tp->connlist, vbc, list);
+ VTAILQ_INSERT_HEAD(&tp->connlist, vtp, list);
i++;
}
- if (vbc != NULL)
+ if (vtp != NULL)
tp->n_conn++;
Lck_Unlock(&tp->mtx);
@@ -293,7 +293,7 @@ VBT_Recycle(const struct worker *wrk, struct tcp_pool *tp, struct vbc **vbcp)
* In varnishtest we do not have the luxury of using
* multiple backend connections, so whenever we end up
* in the "pending" case, take a short nap to let the
- * waiter catch up and put the vbc back into circulations.
+ * waiter catch up and put the vtp back into circulations.
*
* In particular ESI:include related tests suffer random
* failures without this.
@@ -311,30 +311,30 @@ VBT_Recycle(const struct worker *wrk, struct tcp_pool *tp, struct vbc **vbcp)
*/
void
-VBT_Close(struct tcp_pool *tp, struct vbc **vbcp)
+VTP_Close(struct tcp_pool *tp, struct vtp **vtpp)
{
- struct vbc *vbc;
+ struct vtp *vtp;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
- vbc = *vbcp;
- *vbcp = NULL;
- CHECK_OBJ_NOTNULL(vbc, VBC_MAGIC);
+ vtp = *vtpp;
+ *vtpp = NULL;
+ CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
- assert(vbc->state == VBC_STATE_USED);
- assert(vbc->fd > 0);
+ assert(vtp->state == VTP_STATE_USED);
+ assert(vtp->fd > 0);
Lck_Lock(&tp->mtx);
tp->n_used--;
- if (vbc->state == VBC_STATE_STOLEN) {
- (void)shutdown(vbc->fd, SHUT_WR);
- vbc->state = VBC_STATE_CLEANUP;
- VTAILQ_INSERT_HEAD(&tp->killlist, vbc, list);
+ if (vtp->state == VTP_STATE_STOLEN) {
+ (void)shutdown(vtp->fd, SHUT_WR);
+ vtp->state = VTP_STATE_CLEANUP;
+ VTAILQ_INSERT_HEAD(&tp->killlist, vtp, list);
tp->n_kill++;
} else {
- assert(vbc->state == VBC_STATE_USED);
- VTCP_close(&vbc->fd);
- memset(vbc, 0x44, sizeof *vbc);
- free(vbc);
+ assert(vtp->state == VTP_STATE_USED);
+ VTCP_close(&vtp->fd);
+ memset(vtp, 0x44, sizeof *vtp);
+ free(vtp);
}
Lck_Unlock(&tp->mtx);
}
@@ -343,77 +343,77 @@ VBT_Close(struct tcp_pool *tp, struct vbc **vbcp)
* Get a connection
*/
-struct vbc *
-VBT_Get(struct tcp_pool *tp, double tmo, struct worker *wrk)
+struct vtp *
+VTP_Get(struct tcp_pool *tp, double tmo, struct worker *wrk)
{
- struct vbc *vbc;
+ struct vtp *vtp;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
Lck_Lock(&tp->mtx);
- vbc = VTAILQ_FIRST(&tp->connlist);
- CHECK_OBJ_ORNULL(vbc, VBC_MAGIC);
- if (vbc == NULL || vbc->state == VBC_STATE_STOLEN)
- vbc = NULL;
+ vtp = VTAILQ_FIRST(&tp->connlist);
+ CHECK_OBJ_ORNULL(vtp, VTP_MAGIC);
+ if (vtp == NULL || vtp->state == VTP_STATE_STOLEN)
+ vtp = NULL;
else {
- assert(vbc->tcp_pool == tp);
- assert(vbc->state == VBC_STATE_AVAIL);
- VTAILQ_REMOVE(&tp->connlist, vbc, list);
- VTAILQ_INSERT_TAIL(&tp->connlist, vbc, list);
+ assert(vtp->tcp_pool == tp);
+ assert(vtp->state == VTP_STATE_AVAIL);
+ VTAILQ_REMOVE(&tp->connlist, vtp, list);
+ VTAILQ_INSERT_TAIL(&tp->connlist, vtp, list);
tp->n_conn--;
VSC_C_main->backend_reuse++;
- vbc->state = VBC_STATE_STOLEN;
- vbc->cond = &wrk->cond;
+ vtp->state = VTP_STATE_STOLEN;
+ vtp->cond = &wrk->cond;
}
tp->n_used++; // Opening mostly works
Lck_Unlock(&tp->mtx);
- if (vbc != NULL)
- return (vbc);
-
- ALLOC_OBJ(vbc, VBC_MAGIC);
- AN(vbc);
- INIT_OBJ(vbc->waited, WAITED_MAGIC);
- vbc->state = VBC_STATE_USED;
- vbc->tcp_pool = tp;
- vbc->fd = VBT_Open(tp, tmo, &vbc->addr);
- if (vbc->fd < 0) {
- FREE_OBJ(vbc);
+ if (vtp != NULL)
+ return (vtp);
+
+ ALLOC_OBJ(vtp, VTP_MAGIC);
+ AN(vtp);
+ INIT_OBJ(vtp->waited, WAITED_MAGIC);
+ vtp->state = VTP_STATE_USED;
+ vtp->tcp_pool = tp;
+ vtp->fd = VTP_Open(tp, tmo, &vtp->addr);
+ if (vtp->fd < 0) {
+ FREE_OBJ(vtp);
Lck_Lock(&tp->mtx);
tp->n_used--; // Nope, didn't work after all.
Lck_Unlock(&tp->mtx);
} else
VSC_C_main->backend_conn++;
- return (vbc);
+ return (vtp);
}
/*--------------------------------------------------------------------
*/
void
-VBT_Wait(struct worker *wrk, struct vbc *vbc)
+VTP_Wait(struct worker *wrk, struct vtp *vtp)
{
struct tcp_pool *tp;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
- CHECK_OBJ_NOTNULL(vbc, VBC_MAGIC);
- tp = vbc->tcp_pool;
+ CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
+ tp = vtp->tcp_pool;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
- assert(vbc->cond == &wrk->cond);
+ assert(vtp->cond == &wrk->cond);
Lck_Lock(&tp->mtx);
- while (vbc->state == VBC_STATE_STOLEN)
+ while (vtp->state == VTP_STATE_STOLEN)
AZ(Lck_CondWait(&wrk->cond, &tp->mtx, 0));
- assert(vbc->state == VBC_STATE_USED);
- vbc->cond = NULL;
+ assert(vtp->state == VTP_STATE_USED);
+ vtp->cond = NULL;
Lck_Unlock(&tp->mtx);
}
/*--------------------------------------------------------------------*/
void
-VBT_Init(void)
+VTP_Init(void)
{
Lck_New(&pools_mtx, lck_backend);
}
diff --git a/bin/varnishd/cache/cache_tcp_pool.h b/bin/varnishd/cache/cache_tcp_pool.h
index 78bfd08..435b10d 100644
--- a/bin/varnishd/cache/cache_tcp_pool.h
+++ b/bin/varnishd/cache/cache_tcp_pool.h
@@ -32,17 +32,17 @@
struct tcp_pool;
-struct vbc {
+struct vtp {
unsigned magic;
-#define VBC_MAGIC 0x0c5e6592
+#define VTP_MAGIC 0x0c5e6592
int fd;
- VTAILQ_ENTRY(vbc) list;
+ VTAILQ_ENTRY(vtp) list;
const struct suckaddr *addr;
uint8_t state;
-#define VBC_STATE_AVAIL (1<<0)
-#define VBC_STATE_USED (1<<1)
-#define VBC_STATE_STOLEN (1<<2)
-#define VBC_STATE_CLEANUP (1<<3)
+#define VTP_STATE_AVAIL (1<<0)
+#define VTP_STATE_USED (1<<1)
+#define VTP_STATE_STOLEN (1<<2)
+#define VTP_STATE_CLEANUP (1<<3)
struct waited waited[1];
struct tcp_pool *tcp_pool;
@@ -54,10 +54,10 @@ struct vbc {
*/
/* cache_tcp_pool.c */
-struct tcp_pool *VBT_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6);
-void VBT_Rel(struct tcp_pool **tpp);
-int VBT_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa);
-void VBT_Recycle(const struct worker *, struct tcp_pool *, struct vbc **);
-void VBT_Close(struct tcp_pool *tp, struct vbc **vbc);
-struct vbc *VBT_Get(struct tcp_pool *, double tmo, struct worker *);
-void VBT_Wait(struct worker *, struct vbc *);
+struct tcp_pool *VTP_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6);
+void VTP_Rel(struct tcp_pool **tpp);
+int VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa);
+void VTP_Recycle(const struct worker *, struct tcp_pool *, struct vtp **);
+void VTP_Close(struct tcp_pool *tp, struct vtp **);
+struct vtp *VTP_Get(struct tcp_pool *, double tmo, struct worker *);
+void VTP_Wait(struct worker *, struct vtp *);
diff --git a/bin/varnishd/cache/cache_varnishd.h b/bin/varnishd/cache/cache_varnishd.h
index 4e67f7b..1dac112 100644
--- a/bin/varnishd/cache/cache_varnishd.h
+++ b/bin/varnishd/cache/cache_varnishd.h
@@ -58,7 +58,7 @@ void VBE_InitCfg(void);
void VBE_Poll(void);
/* cache_backend_tcp.c */
-void VBT_Init(void);
+void VTP_Init(void);
/* cache_backend_poll.c */
void VBP_Init(void);
More information about the varnish-commit
mailing list