[master] 7ac1238 More s/sp/req/ work
Poul-Henning Kamp
phk at varnish-cache.org
Tue Jun 19 11:22:58 CEST 2012
commit 7ac1238ef00a50511456c6c7cfb9b7d3b71f7ec5
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date: Tue Jun 19 09:22:45 2012 +0000
More s/sp/req/ work
diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index 845fac6..5823943 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -893,7 +893,7 @@ void MPL_Free(struct mempool *mpl, void *item);
void PAN_Init(void);
/* cache_pipe.c */
-void PipeSession(struct sess *sp);
+void PipeRequest(struct req *req);
/* cache_pool.c */
void Pool_Init(void);
@@ -980,7 +980,7 @@ char *VRT_String(struct ws *ws, const char *h, const char *p, va_list ap);
char *VRT_StringList(char *d, unsigned dl, const char *p, va_list ap);
void ESI_Deliver(struct req *);
-void ESI_DeliverChild(const struct sess *);
+void ESI_DeliverChild(struct req *);
/* cache_vrt_vmod.c */
void VMOD_Init(void);
diff --git a/bin/varnishd/cache/cache_backend.c b/bin/varnishd/cache/cache_backend.c
index e640a30..7b3101b 100644
--- a/bin/varnishd/cache/cache_backend.c
+++ b/bin/varnishd/cache/cache_backend.c
@@ -87,10 +87,10 @@ VBE_ReleaseConn(struct vbc *vc)
MPL_Free(vbcpool, vc);
}
-#define FIND_TMO(tmx, dst, sp, be) \
+#define FIND_TMO(tmx, dst, req, be) \
do { \
- CHECK_OBJ_NOTNULL(sp->req->busyobj, BUSYOBJ_MAGIC); \
- dst = sp->req->busyobj->tmx; \
+ CHECK_OBJ_NOTNULL(req->busyobj, BUSYOBJ_MAGIC); \
+ dst = req->busyobj->tmx; \
if (dst == 0.0) \
dst = be->tmx; \
if (dst == 0.0) \
@@ -107,20 +107,20 @@ VBE_ReleaseConn(struct vbc *vc)
*/
static int
-vbe_TryConnect(const struct sess *sp, int pf, const struct sockaddr_storage *sa,
+vbe_TryConnect(const struct req *req, int pf, const struct sockaddr_storage *sa,
socklen_t salen, const struct vdi_simple *vs)
{
int s, i, tmo;
double tmod;
- CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
+ CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CHECK_OBJ_NOTNULL(vs, VDI_SIMPLE_MAGIC);
s = socket(pf, SOCK_STREAM, 0);
if (s < 0)
return (s);
- FIND_TMO(connect_timeout, tmod, sp, vs->vrt);
+ FIND_TMO(connect_timeout, tmod, req, vs->vrt);
tmo = (int)(tmod * 1000.0);
@@ -137,7 +137,7 @@ vbe_TryConnect(const struct sess *sp, int pf, const struct sockaddr_storage *sa,
/*--------------------------------------------------------------------*/
static void
-bes_conn_try(const struct sess *sp, struct vbc *vc, const struct vdi_simple *vs)
+bes_conn_try(struct req *req, struct vbc *vc, const struct vdi_simple *vs)
{
int s;
struct backend *bp = vs->backend;
@@ -157,17 +157,17 @@ bes_conn_try(const struct sess *sp, struct vbc *vc, const struct vdi_simple *vs)
/* release lock during stuff that can take a long time */
if (cache_param->prefer_ipv6 && bp->ipv6 != NULL) {
- s = vbe_TryConnect(sp, PF_INET6, bp->ipv6, bp->ipv6len, vs);
+ s = vbe_TryConnect(req, PF_INET6, bp->ipv6, bp->ipv6len, vs);
vc->addr = bp->ipv6;
vc->addrlen = bp->ipv6len;
}
if (s == -1 && bp->ipv4 != NULL) {
- s = vbe_TryConnect(sp, PF_INET, bp->ipv4, bp->ipv4len, vs);
+ s = vbe_TryConnect(req, PF_INET, bp->ipv4, bp->ipv4len, vs);
vc->addr = bp->ipv4;
vc->addrlen = bp->ipv4len;
}
if (s == -1 && !cache_param->prefer_ipv6 && bp->ipv6 != NULL) {
- s = vbe_TryConnect(sp, PF_INET6, bp->ipv6, bp->ipv6len, vs);
+ s = vbe_TryConnect(req, PF_INET6, bp->ipv6, bp->ipv6len, vs);
vc->addr = bp->ipv6;
vc->addrlen = bp->ipv6len;
}
@@ -183,7 +183,7 @@ bes_conn_try(const struct sess *sp, struct vbc *vc, const struct vdi_simple *vs)
} else {
vc->vsl_id = s | VSL_BACKENDMARKER;
VTCP_myname(s, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
- VSLb(sp->req->vsl, SLT_BackendOpen, "%d %s %s %s ",
+ VSLb(req->vsl, SLT_BackendOpen, "%d %s %s %s ",
vc->fd, vs->backend->display_name, abuf1, pbuf1);
}
@@ -226,7 +226,7 @@ vbe_NewConn(void)
/*--------------------------------------------------------------------
* It evaluates if a backend is healthy _for_a_specific_object_.
- * That means that it relies on sp->req->objcore->objhead. This is mainly for
+ * That means that it relies on req->objcore->objhead. This is mainly for
* saint-mode, but also takes backend->healthy into account. If
* cache_param->saintmode_threshold is 0, this is basically just a test of
* backend->healthy.
@@ -236,7 +236,7 @@ vbe_NewConn(void)
*/
static unsigned int
-vbe_Healthy(const struct vdi_simple *vs, const struct sess *sp)
+vbe_Healthy(const struct vdi_simple *vs, const struct req *req)
{
struct trouble *tr;
struct trouble *tr2;
@@ -246,7 +246,7 @@ vbe_Healthy(const struct vdi_simple *vs, const struct sess *sp)
struct backend *backend;
double now;
- CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
+ CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CHECK_OBJ_NOTNULL(vs, VDI_SIMPLE_MAGIC);
backend = vs->backend;
CHECK_OBJ_NOTNULL(backend, BACKEND_MAGIC);
@@ -271,7 +271,7 @@ vbe_Healthy(const struct vdi_simple *vs, const struct sess *sp)
if (threshold == 0 || VTAILQ_EMPTY(&backend->troublelist))
return (1);
- now = sp->req->t_req;
+ now = req->t_req;
old = NULL;
retval = 1;
@@ -286,7 +286,7 @@ vbe_Healthy(const struct vdi_simple *vs, const struct sess *sp)
break;
}
- if (!memcmp(tr->digest, sp->req->digest, sizeof tr->digest)) {
+ if (!memcmp(tr->digest, req->digest, sizeof tr->digest)) {
retval = 0;
break;
}
@@ -313,12 +313,12 @@ vbe_Healthy(const struct vdi_simple *vs, const struct sess *sp)
*/
static struct vbc *
-vbe_GetVbe(const struct sess *sp, struct vdi_simple *vs)
+vbe_GetVbe(struct req *req, struct vdi_simple *vs)
{
struct vbc *vc;
struct backend *bp;
- CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
+ CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CHECK_OBJ_NOTNULL(vs, VDI_SIMPLE_MAGIC);
bp = vs->backend;
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
@@ -340,20 +340,20 @@ vbe_GetVbe(const struct sess *sp, struct vdi_simple *vs)
if (vbe_CheckFd(vc->fd)) {
/* XXX locking of stats */
VSC_C_main->backend_reuse += 1;
- VSLb(sp->req->vsl, SLT_Backend, "%d %s %s",
- vc->fd, sp->req->director->vcl_name,
+ VSLb(req->vsl, SLT_Backend, "%d %s %s",
+ vc->fd, req->director->vcl_name,
bp->display_name);
vc->vdis = vs;
vc->recycled = 1;
return (vc);
}
VSC_C_main->backend_toolate++;
- VSLb(sp->req->vsl, SLT_BackendClose, "%d %s toolate",
+ VSLb(req->vsl, SLT_BackendClose, "%d %s toolate",
vc->fd, bp->display_name);
/* Checkpoint log to flush all info related to this connection
before the OS reuses the FD */
- VSL_Flush(sp->req->vsl, 0);
+ VSL_Flush(req->vsl, 0);
VTCP_close(&vc->fd);
VBE_DropRefConn(bp);
@@ -361,7 +361,7 @@ vbe_GetVbe(const struct sess *sp, struct vdi_simple *vs)
VBE_ReleaseConn(vc);
}
- if (!vbe_Healthy(vs, sp)) {
+ if (!vbe_Healthy(vs, req)) {
VSC_C_main->backend_unhealthy++;
return (NULL);
}
@@ -375,7 +375,7 @@ vbe_GetVbe(const struct sess *sp, struct vdi_simple *vs)
vc = vbe_NewConn();
assert(vc->fd == -1);
AZ(vc->backend);
- bes_conn_try(sp, vc, vs);
+ bes_conn_try(req, vc, vs);
if (vc->fd < 0) {
VBE_ReleaseConn(vc);
VSC_C_main->backend_fail++;
@@ -383,8 +383,8 @@ vbe_GetVbe(const struct sess *sp, struct vdi_simple *vs)
}
vc->backend = bp;
VSC_C_main->backend_conn++;
- VSLb(sp->req->vsl, SLT_Backend, "%d %s %s",
- vc->fd, sp->req->director->vcl_name, bp->display_name);
+ VSLb(req->vsl, SLT_Backend, "%d %s %s",
+ vc->fd, req->director->vcl_name, bp->display_name);
vc->vdis = vs;
return (vc);
}
@@ -460,12 +460,12 @@ vdi_simple_getfd(const struct director *d, struct req *req)
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CAST_OBJ_NOTNULL(vs, d->priv, VDI_SIMPLE_MAGIC);
- vc = vbe_GetVbe(req->sp, vs);
+ vc = vbe_GetVbe(req, vs);
if (vc != NULL) {
FIND_TMO(first_byte_timeout,
- vc->first_byte_timeout, req->sp, vs->vrt);
+ vc->first_byte_timeout, req, vs->vrt);
FIND_TMO(between_bytes_timeout,
- vc->between_bytes_timeout, req->sp, vs->vrt);
+ vc->between_bytes_timeout, req, vs->vrt);
}
return (vc);
}
@@ -477,7 +477,7 @@ vdi_simple_healthy(const struct director *d, const struct req *req)
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CAST_OBJ_NOTNULL(vs, d->priv, VDI_SIMPLE_MAGIC);
- return (vbe_Healthy(vs, req->sp));
+ return (vbe_Healthy(vs, req));
}
static void
diff --git a/bin/varnishd/cache/cache_center.c b/bin/varnishd/cache/cache_center.c
index 240d0d0..88c269b 100644
--- a/bin/varnishd/cache/cache_center.c
+++ b/bin/varnishd/cache/cache_center.c
@@ -407,14 +407,14 @@ cnt_done(struct sess *sp, struct worker *wrk, struct req *req)
/* XXX: Add StatReq == StatSess */
/* XXX: Workaround for pipe */
if (sp->fd >= 0) {
- VSLb(sp->req->vsl, SLT_Length, "%ju",
+ VSLb(req->vsl, SLT_Length, "%ju",
(uintmax_t)req->req_bodybytes);
}
- VSLb(sp->req->vsl, SLT_ReqEnd, "%u %.9f %.9f %.9f %.9f %.9f",
+ VSLb(req->vsl, SLT_ReqEnd, "%u %.9f %.9f %.9f %.9f %.9f",
req->xid, req->t_req, sp->t_idle, dh, dp, da);
}
req->xid = 0;
- VSL_Flush(sp->req->vsl, 0);
+ VSL_Flush(req->vsl, 0);
req->t_req = NAN;
req->t_resp = NAN;
@@ -532,7 +532,7 @@ cnt_error(struct sess *sp, struct worker *wrk, struct req *req)
if (req->handling == VCL_RET_RESTART &&
req->restarts < cache_param->max_restarts) {
- HSH_Drop(wrk, &sp->req->obj);
+ HSH_Drop(wrk, &req->obj);
VBO_DerefBusyObj(wrk, &req->busyobj);
sp->step = STP_RESTART;
return (0);
@@ -628,7 +628,7 @@ cnt_fetch(struct sess *sp, struct worker *wrk, struct req *req)
*/
EXP_Clr(&bo->exp);
bo->exp.entered = W_TIM_real(wrk);
- RFC2616_Ttl(bo, sp->req->xid);
+ RFC2616_Ttl(bo, req->xid);
/* pass from vclrecv{} has negative TTL */
if (req->objcore->objhead == NULL)
@@ -1077,7 +1077,7 @@ cnt_lookup(struct sess *sp, struct worker *wrk, struct req *req)
VRY_Prep(req);
AZ(req->objcore);
- oc = HSH_Lookup(sp);
+ oc = HSH_Lookup(req);
if (oc == NULL) {
/*
* We lost the session to a busy object, disembark the
@@ -1131,7 +1131,7 @@ cnt_lookup(struct sess *sp, struct worker *wrk, struct req *req)
if (oc->flags & OC_F_PASS) {
wrk->stats.cache_hitpass++;
- VSLb(sp->req->vsl, SLT_HitPass, "%u", req->obj->xid);
+ VSLb(req->vsl, SLT_HitPass, "%u", req->obj->xid);
(void)HSH_Deref(&wrk->stats, NULL, &req->obj);
AZ(req->objcore);
sp->step = STP_PASS;
@@ -1139,7 +1139,7 @@ cnt_lookup(struct sess *sp, struct worker *wrk, struct req *req)
}
wrk->stats.cache_hit++;
- VSLb(sp->req->vsl, SLT_Hit, "%u", req->obj->xid);
+ VSLb(req->vsl, SLT_Hit, "%u", req->obj->xid);
sp->step = STP_HIT;
return (0);
}
@@ -1323,7 +1323,7 @@ cnt_pipe(struct sess *sp, struct worker *wrk, struct req *req)
INCOMPL();
assert(req->handling == VCL_RET_PIPE);
- PipeSession(sp);
+ PipeRequest(req);
assert(WRW_IsReleased(wrk));
http_Teardown(bo->bereq);
VBO_DerefBusyObj(wrk, &req->busyobj);
@@ -1503,7 +1503,7 @@ cnt_start(struct sess *sp, struct worker *wrk, struct req *req)
/* Assign XID and log */
req->xid = ++xids; /* XXX not locked */
- VSLb(sp->req->vsl, SLT_ReqStart, "%s %s %u",
+ VSLb(req->vsl, SLT_ReqStart, "%s %s %u",
sp->addr, sp->port, req->xid);
/* Borrow VCL reference from worker thread */
diff --git a/bin/varnishd/cache/cache_dir_dns.c b/bin/varnishd/cache/cache_dir_dns.c
index e947f61..d4f6772 100644
--- a/bin/varnishd/cache/cache_dir_dns.c
+++ b/bin/varnishd/cache/cache_dir_dns.c
@@ -135,8 +135,10 @@ vdi_dns_comp_addrinfo(const struct director *dir,
* healthy ones.
*/
static struct director *
-vdi_dns_pick_host(const struct sess *sp, struct vdi_dns_hostgroup *group) {
+vdi_dns_pick_host(const struct req *req, struct vdi_dns_hostgroup *group)
+{
int initial, i, nhosts, current;
+
if (group->nhosts == 0)
return (NULL); // In case of error.
if (group->next_host >= group->nhosts)
@@ -150,7 +152,7 @@ vdi_dns_pick_host(const struct sess *sp, struct vdi_dns_hostgroup *group) {
current = i + initial - nhosts;
else
current = i + initial;
- if (VDI_Healthy(group->hosts[current], sp->req)) {
+ if (VDI_Healthy(group->hosts[current], req)) {
group->next_host = current+1;
return (group->hosts[current]);
}
@@ -192,24 +194,22 @@ vdi_dns_groupmatch(const struct vdi_dns_hostgroup *group, const char *hostname)
* and freed.
*/
static int
-vdi_dns_cache_has(const struct sess *sp,
- struct vdi_dns *vs,
- const char *hostname,
- struct director **backend,
- int rwlock)
+vdi_dns_cache_has(const struct req *req, struct vdi_dns *vs,
+ const char *hostname, struct director **backend, int rwlock)
{
struct director *ret;
struct vdi_dns_hostgroup *hostgr;
struct vdi_dns_hostgroup *hostgr2;
+
VTAILQ_FOREACH_SAFE(hostgr, &vs->cachelist, list, hostgr2) {
CHECK_OBJ_NOTNULL(hostgr, VDI_DNSDIR_MAGIC);
- if (hostgr->ttl <= sp->req->t_req) {
+ if (hostgr->ttl <= req->t_req) {
if (rwlock)
vdi_dns_pop_cache(vs, hostgr);
return (0);
}
if (vdi_dns_groupmatch(hostgr, hostname)) {
- ret = (vdi_dns_pick_host(sp, hostgr));
+ ret = (vdi_dns_pick_host(req, hostgr));
*backend = ret;
if (*backend != NULL)
CHECK_OBJ_NOTNULL(*backend, DIRECTOR_MAGIC);
@@ -223,17 +223,17 @@ vdi_dns_cache_has(const struct sess *sp,
* (Sorry for the list_add/_add confusion...)
*/
static void
-vdi_dns_cache_list_add(const struct sess *sp,
- struct vdi_dns *vs,
- struct vdi_dns_hostgroup *new)
+vdi_dns_cache_list_add(const struct req *req, struct vdi_dns *vs,
+ struct vdi_dns_hostgroup *new)
{
+
if (vs->ncachelist >= VDI_DNS_MAX_CACHE) {
VSC_C_main->dir_dns_cache_full++;
vdi_dns_pop_cache(vs, NULL);
}
CHECK_OBJ_NOTNULL(new, VDI_DNSDIR_MAGIC);
assert(new->hostname != 0);
- new->ttl = sp->req->t_req + vs->ttl;
+ new->ttl = req->t_req + vs->ttl;
VTAILQ_INSERT_HEAD(&vs->cachelist, new, list);
vs->ncachelist++;
}
@@ -243,10 +243,8 @@ vdi_dns_cache_list_add(const struct sess *sp,
* cache_has() afterwards to do multiple dns lookups in parallel...
*/
static int
-vdi_dns_cache_add(const struct sess *sp,
- struct vdi_dns *vs,
- const char *hostname,
- struct director **backend)
+vdi_dns_cache_add(const struct req *req, struct vdi_dns *vs,
+ const char *hostname, struct director **backend)
{
int error, i, host = 0;
struct addrinfo *res0, *res, hint;
@@ -258,7 +256,7 @@ vdi_dns_cache_add(const struct sess *sp,
* unique names or something equally troublesome).
*/
- if (vdi_dns_cache_has(sp, vs, hostname, backend, 1))
+ if (vdi_dns_cache_has(req, vs, hostname, backend, 1))
return (1);
memset(&hint, 0, sizeof hint);
@@ -273,7 +271,7 @@ vdi_dns_cache_add(const struct sess *sp,
error = getaddrinfo(hostname, "80", &hint, &res0);
VSC_C_main->dir_dns_lookups++;
if (error) {
- vdi_dns_cache_list_add(sp, vs, new);
+ vdi_dns_cache_list_add(req, vs, new);
VSC_C_main->dir_dns_failed++;
return (0);
}
@@ -297,8 +295,8 @@ vdi_dns_cache_add(const struct sess *sp,
freeaddrinfo(res0);
new->nhosts = host;
- vdi_dns_cache_list_add(sp, vs, new);
- *backend = vdi_dns_pick_host(sp, new);
+ vdi_dns_cache_list_add(req, vs, new);
+ *backend = vdi_dns_pick_host(req, new);
return (1);
}
@@ -308,15 +306,14 @@ vdi_dns_cache_add(const struct sess *sp,
* Returns a backend or NULL.
*/
static struct director *
-vdi_dns_walk_cache(const struct sess *sp,
- struct vdi_dns *vs,
- const char *hostname)
+vdi_dns_walk_cache(const struct req *req, struct vdi_dns *vs,
+ const char *hostname)
{
struct director *backend = NULL;
int ret;
AZ(pthread_rwlock_rdlock(&vs->rwlock));
- ret = vdi_dns_cache_has(sp, vs, hostname, &backend, 0);
+ ret = vdi_dns_cache_has(req, vs, hostname, &backend, 0);
AZ(pthread_rwlock_unlock(&vs->rwlock));
if (!ret) {
/*
@@ -325,7 +322,7 @@ vdi_dns_walk_cache(const struct sess *sp,
* XXX: Should 'ret' be checked for that ?
*/
AZ(pthread_rwlock_wrlock(&vs->rwlock));
- ret = vdi_dns_cache_add(sp, vs, hostname, &backend);
+ ret = vdi_dns_cache_add(req, vs, hostname, &backend);
AZ(pthread_rwlock_unlock(&vs->rwlock));
} else
VSC_C_main->dir_dns_hit++;
@@ -339,7 +336,7 @@ vdi_dns_walk_cache(const struct sess *sp,
/* Parses the Host:-header and heads out to find a backend.
*/
static struct director *
-vdi_dns_find_backend(const struct sess *sp, struct vdi_dns *vs)
+vdi_dns_find_backend(const struct req *req, struct vdi_dns *vs)
{
struct director *ret;
struct http *hp;
@@ -349,10 +346,10 @@ vdi_dns_find_backend(const struct sess *sp, struct vdi_dns *vs)
/* bereq is only present after recv et. al, otherwise use req (ie:
* use req for health checks in vcl_recv and such).
*/
- if (sp->req->busyobj != NULL && sp->req->busyobj->bereq)
- hp = sp->req->busyobj->bereq;
+ if (req->busyobj != NULL && req->busyobj->bereq)
+ hp = req->busyobj->bereq;
else
- hp = sp->req->http;
+ hp = req->http;
CHECK_OBJ_NOTNULL(hp, HTTP_MAGIC);
@@ -367,7 +364,7 @@ vdi_dns_find_backend(const struct sess *sp, struct vdi_dns *vs)
bprintf(hostname, "%.*s%s", (int)(q - p), p,
vs->suffix ? vs->suffix : "");
- ret = vdi_dns_walk_cache(sp, vs, hostname);
+ ret = vdi_dns_walk_cache(req, vs, hostname);
return (ret);
}
@@ -382,7 +379,7 @@ vdi_dns_getfd(const struct director *director, struct req *req)
CHECK_OBJ_NOTNULL(director, DIRECTOR_MAGIC);
CAST_OBJ_NOTNULL(vs, director->priv, VDI_DNS_MAGIC);
- dir = vdi_dns_find_backend(req->sp, vs);
+ dir = vdi_dns_find_backend(req, vs);
if (!dir || !VDI_Healthy(dir, req))
return (NULL);
@@ -408,7 +405,7 @@ vdi_dns_healthy(const struct director *dir, const struct req *req)
CHECK_OBJ_NOTNULL(req->director, DIRECTOR_MAGIC);
CAST_OBJ_NOTNULL(vs, req->director->priv, VDI_DNS_MAGIC);
- dir = vdi_dns_find_backend(req->sp, vs);
+ dir = vdi_dns_find_backend(req, vs);
if (dir)
return (1);
diff --git a/bin/varnishd/cache/cache_esi_deliver.c b/bin/varnishd/cache/cache_esi_deliver.c
index 9199d67..6aed2c3 100644
--- a/bin/varnishd/cache/cache_esi_deliver.c
+++ b/bin/varnishd/cache/cache_esi_deliver.c
@@ -417,14 +417,14 @@ ESI_Deliver(struct req *req)
*/
static uint8_t
-ved_deliver_byterange(const struct sess *sp, ssize_t low, ssize_t high)
+ved_deliver_byterange(const struct req *req, ssize_t low, ssize_t high)
{
struct storage *st;
ssize_t l, lx;
u_char *p;
lx = 0;
- VTAILQ_FOREACH(st, &sp->req->obj->store, list) {
+ VTAILQ_FOREACH(st, &req->obj->store, list) {
p = st->ptr;
l = st->len;
if (lx + l < low) {
@@ -443,7 +443,7 @@ ved_deliver_byterange(const struct sess *sp, ssize_t low, ssize_t high)
l = high - lx;
assert(lx >= low && lx + l <= high);
if (l != 0)
- (void)WRW_Write(sp->wrk, p, l);
+ (void)WRW_Write(req->sp->wrk, p, l);
if (p + l < st->ptr + st->len)
return(p[l]);
lx += l;
@@ -452,7 +452,7 @@ ved_deliver_byterange(const struct sess *sp, ssize_t low, ssize_t high)
}
void
-ESI_DeliverChild(const struct sess *sp)
+ESI_DeliverChild(struct req *req)
{
struct storage *st;
struct object *obj;
@@ -464,9 +464,10 @@ ESI_DeliverChild(const struct sess *sp)
int i, j;
uint8_t tailbuf[8];
- if (!sp->req->obj->gziped) {
- VTAILQ_FOREACH(st, &sp->req->obj->store, list)
- ved_pretend_gzip(sp->req, st->ptr, st->len);
+ CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
+ if (!req->obj->gziped) {
+ VTAILQ_FOREACH(st, &req->obj->store, list)
+ ved_pretend_gzip(req, st->ptr, st->len);
return;
}
/*
@@ -475,9 +476,9 @@ ESI_DeliverChild(const struct sess *sp)
* padding it, as necessary, to a byte boundary.
*/
- dbits = (void*)WS_Alloc(sp->req->ws, 8);
+ dbits = (void*)WS_Alloc(req->ws, 8);
AN(dbits);
- obj = sp->req->obj;
+ obj = req->obj;
CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC);
start = obj->gzip_start;
last = obj->gzip_last;
@@ -495,10 +496,10 @@ ESI_DeliverChild(const struct sess *sp)
* XXX: optimize for the case where the 'last'
* XXX: bit is in a empty copy block
*/
- *dbits = ved_deliver_byterange(sp, start/8, last/8);
+ *dbits = ved_deliver_byterange(req, start/8, last/8);
*dbits &= ~(1U << (last & 7));
- (void)WRW_Write(sp->wrk, dbits, 1);
- cc = ved_deliver_byterange(sp, 1 + last/8, stop/8);
+ (void)WRW_Write(req->sp->wrk, dbits, 1);
+ cc = ved_deliver_byterange(req, 1 + last/8, stop/8);
switch((int)(stop & 7)) {
case 0: /* xxxxxxxx */
/* I think we have an off by one here, but that's OK */
@@ -541,10 +542,10 @@ ESI_DeliverChild(const struct sess *sp)
INCOMPL();
}
if (lpad > 0)
- (void)WRW_Write(sp->wrk, dbits + 1, lpad);
+ (void)WRW_Write(req->sp->wrk, dbits + 1, lpad);
/* We need the entire tail, but it may not be in one storage segment */
- st = VTAILQ_LAST(&sp->req->obj->store, storagehead);
+ st = VTAILQ_LAST(&req->obj->store, storagehead);
for (i = sizeof tailbuf; i > 0; i -= j) {
j = st->len;
if (j > i)
@@ -556,6 +557,6 @@ ESI_DeliverChild(const struct sess *sp)
icrc = vle32dec(tailbuf);
ilen = vle32dec(tailbuf + 4);
- sp->req->crc = crc32_combine(sp->req->crc, icrc, ilen);
- sp->req->l_crc += ilen;
+ req->crc = crc32_combine(req->crc, icrc, ilen);
+ req->l_crc += ilen;
}
diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c
index 129a0a7..0e2fa5f 100644
--- a/bin/varnishd/cache/cache_fetch.c
+++ b/bin/varnishd/cache/cache_fetch.c
@@ -48,7 +48,7 @@ static unsigned fetchfrag;
* We want to issue the first error we encounter on fetching and
* supress the rest. This function does that.
*
- * Other code is allowed to look at sp->req->busyobj->fetch_failed to bail out
+ * Other code is allowed to look at busyobj->fetch_failed to bail out
*
* For convenience, always return -1
*/
diff --git a/bin/varnishd/cache/cache_gzip.c b/bin/varnishd/cache/cache_gzip.c
index 80ce07b..e29593d 100644
--- a/bin/varnishd/cache/cache_gzip.c
+++ b/bin/varnishd/cache/cache_gzip.c
@@ -198,8 +198,7 @@ VGZ_ObufFull(const struct vgz *vg)
}
/*--------------------------------------------------------------------
- * Keep the outbuffer supplied with storage and file it under the
- * sp->req->obj as it fills.
+ * Keep the outbuffer supplied with storage
*/
int
diff --git a/bin/varnishd/cache/cache_hash.c b/bin/varnishd/cache/cache_hash.c
index f2d5be3..68e4cda 100644
--- a/bin/varnishd/cache/cache_hash.c
+++ b/bin/varnishd/cache/cache_hash.c
@@ -288,22 +288,19 @@ HSH_Insert(struct worker *wrk, const void *digest, struct objcore *oc)
*/
struct objcore *
-HSH_Lookup(struct sess *sp)
+HSH_Lookup(struct req *req)
{
struct worker *wrk;
struct objhead *oh;
struct objcore *oc;
struct objcore *grace_oc;
struct object *o;
- struct req *req;
double grace_ttl;
int busy_found;
- CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
- wrk = sp->wrk;
- CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
- req = sp->req;
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
+ wrk = req->sp->wrk;
+ CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
CHECK_OBJ_NOTNULL(req->http, HTTP_MAGIC);
AN(req->director);
AN(hash);
@@ -421,19 +418,20 @@ HSH_Lookup(struct sess *sp)
oh->waitinglist = wrk->nwaitinglist;
wrk->nwaitinglist = NULL;
}
- VTAILQ_INSERT_TAIL(&oh->waitinglist->list, sp, list);
+ VTAILQ_INSERT_TAIL(&oh->waitinglist->list,
+ req->sp, list);
}
if (cache_param->diag_bitmap & 0x20)
VSLb(req->vsl, SLT_Debug,
"on waiting list <%p>", oh);
- SES_Charge(sp);
+ SES_Charge(req->sp);
/*
* The objhead reference transfers to the sess, we get it
* back when the sess comes off the waiting list and
* calls us again
*/
req->hash_objhead = oh;
- sp->wrk = NULL;
+ req->sp->wrk = NULL;
Lck_Unlock(&oh->mtx);
return (NULL);
}
@@ -450,7 +448,7 @@ HSH_Lookup(struct sess *sp)
AZ(req->busyobj);
req->busyobj = VBO_GetBusyObj(wrk);
- req->busyobj->vsl->wid = sp->vsl_id;
+ req->busyobj->vsl->wid = req->sp->vsl_id;
req->busyobj->refcount = 2; /* One for req, one for FetchBody */
VRY_Validate(req->vary_b);
diff --git a/bin/varnishd/cache/cache_pipe.c b/bin/varnishd/cache/cache_pipe.c
index 77ffcda..13a8c10 100644
--- a/bin/varnishd/cache/cache_pipe.c
+++ b/bin/varnishd/cache/cache_pipe.c
@@ -60,7 +60,7 @@ rdf(int fd0, int fd1)
}
void
-PipeSession(struct sess *sp)
+PipeRequest(struct req *req)
{
struct vbc *vc;
struct worker *wrk;
@@ -68,36 +68,36 @@ PipeSession(struct sess *sp)
struct busyobj *bo;
int i;
- CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
- CHECK_OBJ_NOTNULL(sp->wrk, WORKER_MAGIC);
- bo = sp->req->busyobj;
+ CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
+ CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
+ wrk = req->sp->wrk;
+ CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+ bo = req->busyobj;
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
- wrk = sp->wrk;
- vc = VDI_GetFd(NULL, sp->req);
+ vc = VDI_GetFd(NULL, req);
if (vc == NULL)
return;
bo->vbc = vc; /* For panic dumping */
(void)VTCP_blocking(vc->fd);
- WRW_Reserve(wrk, &vc->fd, bo->vsl, sp->req->t_req);
- sp->wrk->acct_tmp.hdrbytes +=
- http_Write(wrk, bo->bereq, 0);
+ WRW_Reserve(wrk, &vc->fd, bo->vsl, req->t_req);
+ wrk->acct_tmp.hdrbytes += http_Write(wrk, bo->bereq, 0);
- if (sp->req->htc->pipeline.b != NULL)
- sp->wrk->acct_tmp.bodybytes +=
- WRW_Write(wrk, sp->req->htc->pipeline.b,
- Tlen(sp->req->htc->pipeline));
+ if (req->htc->pipeline.b != NULL)
+ wrk->acct_tmp.bodybytes +=
+ WRW_Write(wrk, req->htc->pipeline.b,
+ Tlen(req->htc->pipeline));
i = WRW_FlushRelease(wrk);
if (i) {
- SES_Close(sp, "pipe");
+ SES_Close(req->sp, "pipe");
VDI_CloseFd(&vc);
return;
}
- sp->req->t_resp = VTIM_real();
+ req->t_resp = VTIM_real();
memset(fds, 0, sizeof fds);
@@ -105,8 +105,8 @@ PipeSession(struct sess *sp)
fds[0].fd = vc->fd;
fds[0].events = POLLIN | POLLERR;
- // XXX: not yet (void)VTCP_linger(sp->fd, 0);
- fds[1].fd = sp->fd;
+ // XXX: not yet (void)VTCP_linger(req->sp->fd, 0);
+ fds[1].fd = req->sp->fd;
fds[1].events = POLLIN | POLLERR;
while (fds[0].fd > -1 || fds[1].fd > -1) {
@@ -115,24 +115,24 @@ PipeSession(struct sess *sp)
i = poll(fds, 2, cache_param->pipe_timeout * 1000);
if (i < 1)
break;
- if (fds[0].revents && rdf(vc->fd, sp->fd)) {
+ if (fds[0].revents && rdf(vc->fd, req->sp->fd)) {
if (fds[1].fd == -1)
break;
(void)shutdown(vc->fd, SHUT_RD);
- (void)shutdown(sp->fd, SHUT_WR);
+ (void)shutdown(req->sp->fd, SHUT_WR);
fds[0].events = 0;
fds[0].fd = -1;
}
- if (fds[1].revents && rdf(sp->fd, vc->fd)) {
+ if (fds[1].revents && rdf(req->sp->fd, vc->fd)) {
if (fds[0].fd == -1)
break;
- (void)shutdown(sp->fd, SHUT_RD);
+ (void)shutdown(req->sp->fd, SHUT_RD);
(void)shutdown(vc->fd, SHUT_WR);
fds[1].events = 0;
fds[1].fd = -1;
}
}
- SES_Close(sp, "pipe");
+ SES_Close(req->sp, "pipe");
VDI_CloseFd(&vc);
bo->vbc = NULL;
}
diff --git a/bin/varnishd/cache/cache_response.c b/bin/varnishd/cache/cache_response.c
index 59aa6cb..85c8233 100644
--- a/bin/varnishd/cache/cache_response.c
+++ b/bin/varnishd/cache/cache_response.c
@@ -154,30 +154,30 @@ RES_BuildHttp(struct req *req)
*/
static void
-res_WriteGunzipObj(const struct sess *sp)
+res_WriteGunzipObj(struct req *req)
{
struct storage *st;
unsigned u = 0;
struct vgz *vg;
int i;
- CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
+ CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
- vg = VGZ_NewUngzip(sp->req->vsl, "U D -");
+ vg = VGZ_NewUngzip(req->vsl, "U D -");
AZ(VGZ_WrwInit(vg));
- VTAILQ_FOREACH(st, &sp->req->obj->store, list) {
- CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
+ VTAILQ_FOREACH(st, &req->obj->store, list) {
+ CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
u += st->len;
- i = VGZ_WrwGunzip(sp->wrk, vg, st->ptr, st->len);
+ i = VGZ_WrwGunzip(req->sp->wrk, vg, st->ptr, st->len);
/* XXX: error check */
(void)i;
}
- VGZ_WrwFlush(sp->wrk, vg);
+ VGZ_WrwFlush(req->sp->wrk, vg);
(void)VGZ_Destroy(&vg);
- assert(u == sp->req->obj->len);
+ assert(u == req->obj->len);
}
/*--------------------------------------------------------------------*/
@@ -270,12 +270,12 @@ RES_WriteObj(struct req *req)
} else if (req->res_mode & RES_ESI) {
ESI_Deliver(req);
} else if (req->res_mode & RES_ESI_CHILD && req->gzip_resp) {
- ESI_DeliverChild(req->sp);
+ ESI_DeliverChild(req);
} else if (req->res_mode & RES_ESI_CHILD &&
!req->gzip_resp && req->obj->gziped) {
- res_WriteGunzipObj(req->sp);
+ res_WriteGunzipObj(req);
} else if (req->res_mode & RES_GUNZIP) {
- res_WriteGunzipObj(req->sp);
+ res_WriteGunzipObj(req);
} else {
res_WriteDirObj(req, low, high);
}
diff --git a/bin/varnishd/hash/hash_slinger.h b/bin/varnishd/hash/hash_slinger.h
index fe0fb9f..5e05748 100644
--- a/bin/varnishd/hash/hash_slinger.h
+++ b/bin/varnishd/hash/hash_slinger.h
@@ -53,7 +53,7 @@ struct hash_slinger {
/* cache_hash.c */
void HSH_Cleanup(struct worker *w);
-struct objcore *HSH_Lookup(struct sess *sp);
+struct objcore *HSH_Lookup(struct req *);
void HSH_Ref(struct objcore *o);
void HSH_Drop(struct worker *, struct object **);
void HSH_Init(const struct hash_slinger *slinger);
More information about the varnish-commit
mailing list