[master] bc31f5c Rename WRW to V1L (Version 1 Lineprotocol)

Poul-Henning Kamp phk at FreeBSD.org
Wed Oct 22 22:56:46 CEST 2014


commit bc31f5c13883ce4e88a2d50603b3d7454d539a61
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Wed Oct 22 20:56:22 2014 +0000

    Rename WRW to V1L (Version 1 Lineprotocol)

diff --git a/bin/varnishd/Makefile.am b/bin/varnishd/Makefile.am
index 922459e..a347119 100644
--- a/bin/varnishd/Makefile.am
+++ b/bin/varnishd/Makefile.am
@@ -46,7 +46,6 @@ varnishd_SOURCES = \
 	cache/cache_vrt_var.c \
 	cache/cache_vrt_vmod.c \
 	cache/cache_wrk.c \
-	cache/cache_wrw.c \
 	cache/cache_ws.c \
 	common/common_vsm.c \
 	common/common_vsc.c \
@@ -57,6 +56,7 @@ varnishd_SOURCES = \
 	http1/cache_http1_deliver.c \
 	http1/cache_http1_fetch.c \
 	http1/cache_http1_fsm.c \
+	http1/cache_http1_line.c \
 	http1/cache_http1_pipe.c \
 	http1/cache_http1_proto.c \
 	http1/cache_http1_vfp.c \
diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index 3ba86b7..6b63de3 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -123,7 +123,7 @@ struct vrt_privs;
 struct vsb;
 struct waitinglist;
 struct worker;
-struct wrw;
+struct v1l;
 
 #define DIGEST_LEN		32
 
@@ -344,7 +344,7 @@ struct worker {
 
 	double			lastused;
 
-	struct wrw		*wrw;
+	struct v1l		*v1l;
 
 	pthread_cond_t		cond;
 
@@ -948,13 +948,13 @@ int Pool_Task(struct pool *pp, struct pool_task *task, enum pool_how how);
 void Pool_Sumstat(struct worker *w);
 void Pool_PurgeStat(unsigned nobj);
 
-#define WRW_IsReleased(w)	((w)->wrw == NULL)
-void WRW_Chunked(const struct worker *w);
-void WRW_EndChunk(const struct worker *w);
-void WRW_Reserve(struct worker *w, int *fd, struct vsl_log *, double t0);
-unsigned WRW_Flush(const struct worker *w);
-unsigned WRW_FlushRelease(struct worker *w, uint64_t *pacc);
-unsigned WRW_Write(const struct worker *w, const void *ptr, int len);
+#define V1L_IsReleased(w)	((w)->v1l == NULL)
+void V1L_Chunked(const struct worker *w);
+void V1L_EndChunk(const struct worker *w);
+void V1L_Reserve(struct worker *w, int *fd, struct vsl_log *, double t0);
+unsigned V1L_Flush(const struct worker *w);
+unsigned V1L_FlushRelease(struct worker *w, uint64_t *pacc);
+unsigned V1L_Write(const struct worker *w, const void *ptr, int len);
 
 /* cache_session.c [SES] */
 void SES_Close(struct sess *sp, enum sess_close reason);
diff --git a/bin/varnishd/cache/cache_esi_deliver.c b/bin/varnishd/cache/cache_esi_deliver.c
index 2663c7d..886bec2 100644
--- a/bin/varnishd/cache/cache_esi_deliver.c
+++ b/bin/varnishd/cache/cache_esi_deliver.c
@@ -56,7 +56,7 @@ ved_include(struct req *preq, const char *src, const char *host)
 	if (preq->esi_level >= cache_param->max_esi_depth)
 		return;
 
-	(void)WRW_FlushRelease(preq->wrk, NULL);
+	(void)V1L_FlushRelease(preq->wrk, NULL);
 
 	/* Take a workspace snapshot */
 	wrk_ws_wm = WS_Snapshot(wrk->aws); /* XXX ? */
@@ -130,7 +130,7 @@ ved_include(struct req *preq, const char *src, const char *host)
 		AZ(req->wrk);
 		(void)usleep(10000);
 	}
-	AN(WRW_IsReleased(wrk));
+	AN(V1L_IsReleased(wrk));
 
 	/* Charge the transmitted body byte counts also to the parent request */
 	preq->acct.resp_bodybytes += req->acct.resp_bodybytes;
@@ -141,9 +141,9 @@ ved_include(struct req *preq, const char *src, const char *host)
 	/* Reset the workspace */
 	WS_Reset(wrk->aws, wrk_ws_wm);	/* XXX ? */
 
-	WRW_Reserve(preq->wrk, &preq->sp->fd, preq->vsl, preq->t_prev);
+	V1L_Reserve(preq->wrk, &preq->sp->fd, preq->vsl, preq->t_prev);
 	if (preq->res_mode & RES_CHUNKED)
-		WRW_Chunked(preq->wrk);
+		V1L_Chunked(preq->wrk);
 
 	preq->vcl = req->vcl;
 	req->vcl = NULL;
diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c
index 0810eea..1bf0934 100644
--- a/bin/varnishd/cache/cache_fetch.c
+++ b/bin/varnishd/cache/cache_fetch.c
@@ -588,7 +588,7 @@ vbf_stp_fetch(struct worker *wrk, struct busyobj *bo)
 		return (F_STP_ERROR);
 	}
 
-	assert(WRW_IsReleased(wrk));
+	assert(V1L_IsReleased(wrk));
 
 
 	if (bo->do_gzip || (bo->is_gzip && !bo->do_gunzip))
@@ -875,7 +875,7 @@ vbf_fetch_thread(struct worker *wrk, void *priv)
 			WRONG("Illegal fetch_step");
 		}
 	}
-	assert(WRW_IsReleased(wrk));
+	assert(V1L_IsReleased(wrk));
 
 	assert(bo->director_state == DIR_S_NULL);
 
diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c
index 48e894d..3b5755b 100644
--- a/bin/varnishd/cache/cache_req_fsm.c
+++ b/bin/varnishd/cache/cache_req_fsm.c
@@ -63,7 +63,7 @@ cnt_deliver(struct worker *wrk, struct req *req)
 	CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
 	CHECK_OBJ_NOTNULL(req->objcore->objhead, OBJHEAD_MAGIC);
 	CHECK_OBJ_NOTNULL(req->vcl, VCL_CONF_MAGIC);
-	assert(WRW_IsReleased(wrk));
+	assert(V1L_IsReleased(wrk));
 
 	assert(req->objcore->refcnt > 0);
 
@@ -159,7 +159,7 @@ cnt_deliver(struct worker *wrk, struct req *req)
 		ObjSlim(wrk, req->objcore);
 	}
 
-	assert(WRW_IsReleased(wrk));
+	assert(V1L_IsReleased(wrk));
 	(void)HSH_DerefObjCore(wrk, &req->objcore);
 	http_Teardown(req->resp);
 	return (REQ_FSM_DONE);
@@ -517,7 +517,7 @@ cnt_pipe(struct worker *wrk, struct req *req)
 	assert(wrk->handling == VCL_RET_PIPE);
 
 	V1P_Process(req, bo);
-	assert(WRW_IsReleased(wrk));
+	assert(V1L_IsReleased(wrk));
 	http_Teardown(bo->bereq);
 	THR_SetBusyobj(NULL);
 	VBO_DerefBusyObj(wrk, &bo);
@@ -794,7 +794,7 @@ CNT_Request(struct worker *wrk, struct req *req)
 		VRB_Free(req);
 		req->wrk = NULL;
 	}
-	assert(WRW_IsReleased(wrk));
+	assert(V1L_IsReleased(wrk));
 	return (nxt);
 }
 
diff --git a/bin/varnishd/cache/cache_session.c b/bin/varnishd/cache/cache_session.c
index 5d5b289..024d74f 100644
--- a/bin/varnishd/cache/cache_session.c
+++ b/bin/varnishd/cache/cache_session.c
@@ -114,7 +114,7 @@ ses_req_pool_task(struct worker *wrk, void *arg)
 	wrk->lastused = NAN;
 	HTTP1_Session(wrk, req);
 	WS_Assert(wrk->aws);
-	AZ(wrk->wrw);
+	AZ(wrk->v1l);
 	if (DO_DEBUG(DBG_VCLREL) && wrk->vcl != NULL)
 		VCL_Rel(&wrk->vcl);
 	THR_SetRequest(NULL);
diff --git a/bin/varnishd/cache/cache_shmlog.c b/bin/varnishd/cache/cache_shmlog.c
index 04c24a8..8554658 100644
--- a/bin/varnishd/cache/cache_shmlog.c
+++ b/bin/varnishd/cache/cache_shmlog.c
@@ -371,7 +371,7 @@ VSLb_ts(struct vsl_log *vsl, const char *event, double first, double *pprev,
 
 	/* XXX: Make an option to turn off some unnecessary timestamp
 	   logging. This must be done carefully because some functions
-	   (e.g. WRW_Reserve) takes the last timestamp as it's inital
+	   (e.g. V1L_Reserve) takes the last timestamp as it's inital
 	   value for timeout calculation. */
 	vsl_sanity(vsl);
 	assert(!isnan(now) && now != 0.);
diff --git a/bin/varnishd/cache/cache_wrw.c b/bin/varnishd/cache/cache_wrw.c
deleted file mode 100644
index ef7b322..0000000
--- a/bin/varnishd/cache/cache_wrw.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*-
- * Copyright (c) 2006 Verdens Gang AS
- * Copyright (c) 2006-2011 Varnish Software AS
- * All rights reserved.
- *
- * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Write data to fd
- * We try to use writev() if possible in order to minimize number of
- * syscalls made and packets sent.  It also just might allow the worker
- * thread to complete the request without holding stuff locked.
- */
-
-#include "config.h"
-
-#include <sys/types.h>
-#include <sys/uio.h>
-
-#include <limits.h>
-#include <stdio.h>
-
-#include "cache.h"
-#include "vtim.h"
-
-/*--------------------------------------------------------------------*/
-
-struct wrw {
-	unsigned		magic;
-#define WRW_MAGIC		0x2f2142e5
-	int			*wfd;
-	unsigned		werr;	/* valid after WRW_Flush() */
-	struct iovec		*iov;
-	unsigned		siov;
-	unsigned		niov;
-	ssize_t			liov;
-	ssize_t			cliov;
-	unsigned		ciov;	/* Chunked header marker */
-	double			t0;
-	struct vsl_log		*vsl;
-	ssize_t			cnt;	/* Flushed byte count */
-};
-
-/*--------------------------------------------------------------------
- */
-
-void
-WRW_Reserve(struct worker *wrk, int *fd, struct vsl_log *vsl, double t0)
-{
-	struct wrw *wrw;
-	unsigned u;
-
-	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
-	AZ(wrk->wrw);
-	wrw = WS_Alloc(wrk->aws, sizeof *wrw);
-	AN(wrw);
-	INIT_OBJ(wrw, WRW_MAGIC);
-	u = WS_Reserve(wrk->aws, 0);
-	u = PRNDDN(u);
-	u /= sizeof(struct iovec);
-	if (u > IOV_MAX)
-		u = IOV_MAX;
-	AN(u);
-	wrw->iov = (void*)PRNDUP(wrk->aws->f);
-	wrw->siov = u;
-	wrw->ciov = u;
-	wrw->werr = 0;
-	wrw->liov = 0;
-	wrw->niov = 0;
-	wrw->wfd = fd;
-	wrw->t0 = t0;
-	wrw->vsl = vsl;
-	wrk->wrw = wrw;
-}
-
-static void
-wrw_release(struct worker *wrk, uint64_t *pacc)
-{
-	struct wrw *wrw;
-
-	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
-	wrw = wrk->wrw;
-	wrk->wrw = NULL;
-	CHECK_OBJ_NOTNULL(wrw, WRW_MAGIC);
-	if (pacc != NULL)
-		*pacc += wrw->cnt;
-	WS_Release(wrk->aws, 0);
-	WS_Reset(wrk->aws, NULL);
-}
-
-static void
-wrw_prune(struct wrw *wrw, ssize_t bytes)
-{
-	ssize_t used = 0;
-	ssize_t j, used_here;
-
-	for (j = 0; j < wrw->niov; j++) {
-		if (used + wrw->iov[j].iov_len > bytes) {
-			/* Cutoff is in this iov */
-			used_here = bytes - used;
-			wrw->iov[j].iov_len -= used_here;
-			wrw->iov[j].iov_base =
-			    (char*)wrw->iov[j].iov_base + used_here;
-			memmove(wrw->iov, &wrw->iov[j],
-			    (wrw->niov - j) * sizeof(struct iovec));
-			wrw->niov -= j;
-			wrw->liov -= bytes;
-			return;
-		}
-		used += wrw->iov[j].iov_len;
-	}
-	AZ(wrw->liov);
-}
-
-unsigned
-WRW_Flush(const struct worker *wrk)
-{
-	ssize_t i;
-	struct wrw *wrw;
-	char cbuf[32];
-
-	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
-	wrw = wrk->wrw;
-	CHECK_OBJ_NOTNULL(wrw, WRW_MAGIC);
-	AN(wrw->wfd);
-
-	/* For chunked, there must be one slot reserved for the chunked tail */
-	if (wrw->ciov < wrw->siov)
-		assert(wrw->niov < wrw->siov);
-
-	if (*wrw->wfd >= 0 && wrw->liov > 0 && wrw->werr == 0) {
-		if (wrw->ciov < wrw->siov && wrw->cliov > 0) {
-			/* Add chunk head & tail */
-			bprintf(cbuf, "00%zx\r\n", wrw->cliov);
-			i = strlen(cbuf);
-			wrw->iov[wrw->ciov].iov_base = cbuf;
-			wrw->iov[wrw->ciov].iov_len = i;
-			wrw->liov += i;
-
-			wrw->iov[wrw->niov].iov_base = cbuf + i - 2;
-			wrw->iov[wrw->niov++].iov_len = 2;
-			wrw->liov += 2;
-		} else if (wrw->ciov < wrw->siov) {
-			wrw->iov[wrw->ciov].iov_base = cbuf;
-			wrw->iov[wrw->ciov].iov_len = 0;
-		}
-
-		i = writev(*wrw->wfd, wrw->iov, wrw->niov);
-		if (i > 0)
-			wrw->cnt += i;
-		while (i != wrw->liov && i > 0) {
-			/* Remove sent data from start of I/O vector,
-			 * then retry; we hit a timeout, but some data
-			 * was sent.
-			 *
-			 * XXX: Add a "minimum sent data per timeout
-			 * counter to prevent slowlaris attacks
-			*/
-
-			if (VTIM_real() - wrw->t0 > cache_param->send_timeout) {
-				VSLb(wrw->vsl, SLT_Debug,
-				    "Hit total send timeout, "
-				    "wrote = %zd/%zd; not retrying",
-				    i, wrw->liov);
-				i = -1;
-				break;
-			}
-
-			VSLb(wrw->vsl, SLT_Debug,
-			    "Hit idle send timeout, wrote = %zd/%zd; retrying",
-			    i, wrw->liov);
-
-			wrw_prune(wrw, i);
-			i = writev(*wrw->wfd, wrw->iov, wrw->niov);
-			if (i > 0)
-				wrw->cnt += i;
-		}
-		if (i <= 0) {
-			wrw->werr++;
-			VSLb(wrw->vsl, SLT_Debug,
-			    "Write error, retval = %zd, len = %zd, errno = %s",
-			    i, wrw->liov, strerror(errno));
-		}
-	}
-	wrw->liov = 0;
-	wrw->cliov = 0;
-	wrw->niov = 0;
-	if (wrw->ciov < wrw->siov)
-		wrw->ciov = wrw->niov++;
-	return (wrw->werr);
-}
-
-unsigned
-WRW_FlushRelease(struct worker *wrk, uint64_t *pacc)
-{
-	unsigned u;
-
-	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
-	AN(wrk->wrw->wfd);
-	u = WRW_Flush(wrk);
-	wrw_release(wrk, pacc);
-	return (u);
-}
-
-unsigned
-WRW_Write(const struct worker *wrk, const void *ptr, int len)
-{
-	struct wrw *wrw;
-
-	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
-	wrw = wrk->wrw;
-	CHECK_OBJ_NOTNULL(wrw, WRW_MAGIC);
-	AN(wrw->wfd);
-	if (len == 0 || *wrw->wfd < 0)
-		return (0);
-	if (len == -1)
-		len = strlen(ptr);
-	if (wrw->niov >= wrw->siov - (wrw->ciov < wrw->siov ? 1 : 0))
-		(void)WRW_Flush(wrk);
-	wrw->iov[wrw->niov].iov_base = TRUST_ME(ptr);
-	wrw->iov[wrw->niov].iov_len = len;
-	wrw->liov += len;
-	wrw->niov++;
-	if (wrw->ciov < wrw->siov) {
-		assert(wrw->niov < wrw->siov);
-		wrw->cliov += len;
-	}
-	return (len);
-}
-
-void
-WRW_Chunked(const struct worker *wrk)
-{
-	struct wrw *wrw;
-
-	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
-	wrw = wrk->wrw;
-	CHECK_OBJ_NOTNULL(wrw, WRW_MAGIC);
-
-	assert(wrw->ciov == wrw->siov);
-	/*
-	 * If there are not space for chunked header, a chunk of data and
-	 * a chunk tail, we might as well flush right away.
-	 */
-	if (wrw->niov + 3 >= wrw->siov)
-		(void)WRW_Flush(wrk);
-	wrw->ciov = wrw->niov++;
-	wrw->cliov = 0;
-	assert(wrw->ciov < wrw->siov);
-	assert(wrw->niov < wrw->siov);
-}
-
-/*
- * XXX: It is not worth the complexity to attempt to get the
- * XXX: end of chunk into the WRW_Flush(), because most of the time
- * XXX: if not always, that is a no-op anyway, because the calling
- * XXX: code already called WRW_Flush() to release local storage.
- */
-
-void
-WRW_EndChunk(const struct worker *wrk)
-{
-	struct wrw *wrw;
-
-	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
-	wrw = wrk->wrw;
-	CHECK_OBJ_NOTNULL(wrw, WRW_MAGIC);
-
-	assert(wrw->ciov < wrw->siov);
-	(void)WRW_Flush(wrk);
-	wrw->ciov = wrw->siov;
-	wrw->niov = 0;
-	wrw->cliov = 0;
-	(void)WRW_Write(wrk, "0\r\n\r\n", -1);
-}
diff --git a/bin/varnishd/http1/cache_http1_deliver.c b/bin/varnishd/http1/cache_http1_deliver.c
index cde97b5..29c8171 100644
--- a/bin/varnishd/http1/cache_http1_deliver.c
+++ b/bin/varnishd/http1/cache_http1_deliver.c
@@ -50,9 +50,9 @@ v1d_bytes(struct req *req, enum vdp_action act, void **priv,
 	AZ(req->vdp_nxt);		/* always at the bottom of the pile */
 
 	if (len > 0)
-		wl = WRW_Write(req->wrk, ptr, len);
+		wl = V1L_Write(req->wrk, ptr, len);
 	req->acct.resp_bodybytes += len;
-	if (act > VDP_NULL && WRW_Flush(req->wrk))
+	if (act > VDP_NULL && V1L_Flush(req->wrk))
 		return (-1);
 	if (len != wl)
 		return (-1);
@@ -327,7 +327,7 @@ V1D_Deliver(struct req *req, struct busyobj *bo)
 			v1d_dorange(req, bo, r);
 	}
 
-	WRW_Reserve(req->wrk, &req->sp->fd, req->vsl, req->t_prev);
+	V1L_Reserve(req->wrk, &req->sp->fd, req->vsl, req->t_prev);
 
 	/*
 	 * Send HTTP protocol header, unless interior ESI object
@@ -337,7 +337,7 @@ V1D_Deliver(struct req *req, struct busyobj *bo)
 		    HTTP1_Write(req->wrk, req->resp, HTTP1_Resp);
 
 	if (req->res_mode & RES_CHUNKED)
-		WRW_Chunked(req->wrk);
+		V1L_Chunked(req->wrk);
 
 	ois = OIS_DONE;
 	if (!req->wantbody) {
@@ -379,9 +379,9 @@ V1D_Deliver(struct req *req, struct busyobj *bo)
 	if (ois == OIS_DONE &&
 	    (req->res_mode & RES_CHUNKED) &&
 	    !(req->res_mode & RES_ESI_CHILD))
-		WRW_EndChunk(req->wrk);
+		V1L_EndChunk(req->wrk);
 
-	if ((WRW_FlushRelease(req->wrk, NULL) || ois != OIS_DONE) &&
+	if ((V1L_FlushRelease(req->wrk, NULL) || ois != OIS_DONE) &&
 	    req->sp->fd >= 0)
 		SES_Close(req->sp, SC_REM_CLOSE);
 }
diff --git a/bin/varnishd/http1/cache_http1_fetch.c b/bin/varnishd/http1/cache_http1_fetch.c
index 3289acd..77b3675 100644
--- a/bin/varnishd/http1/cache_http1_fetch.c
+++ b/bin/varnishd/http1/cache_http1_fetch.c
@@ -56,8 +56,8 @@ vbf_iter_req_body(struct req *req, void *priv, void *ptr, size_t l)
 	CAST_OBJ_NOTNULL(wrk, priv, WORKER_MAGIC);
 
 	if (l > 0) {
-		(void)WRW_Write(wrk, ptr, l);
-		if (WRW_Flush(wrk))
+		(void)V1L_Write(wrk, ptr, l);
+		if (V1L_Flush(wrk))
 			return (-1);
 	}
 	return (0);
@@ -112,7 +112,7 @@ V1F_fetch_hdr(struct worker *wrk, struct busyobj *bo)
 	}
 
 	(void)VTCP_blocking(vc->fd);	/* XXX: we should timeout instead */
-	WRW_Reserve(wrk, &vc->fd, bo->vsl, bo->t_prev);
+	V1L_Reserve(wrk, &vc->fd, bo->vsl, bo->t_prev);
 	hdrbytes = HTTP1_Write(wrk, hp, HTTP1_Req);
 
 	/* Deal with any message-body the request might (still) have */
@@ -120,7 +120,7 @@ V1F_fetch_hdr(struct worker *wrk, struct busyobj *bo)
 
 	if (bo->req != NULL) {
 		if (do_chunked)
-			WRW_Chunked(wrk);
+			V1L_Chunked(wrk);
 		i = VRB_Iterate(bo->req, vbf_iter_req_body, wrk);
 
 		if (bo->req->req_body_status == REQ_BODY_TAKEN) {
@@ -133,10 +133,10 @@ V1F_fetch_hdr(struct worker *wrk, struct busyobj *bo)
 			retry = -1;
 		}
 		if (do_chunked)
-			WRW_EndChunk(wrk);
+			V1L_EndChunk(wrk);
 	}
 
-	j = WRW_FlushRelease(wrk, &bo->acct.bereq_hdrbytes);
+	j = V1L_FlushRelease(wrk, &bo->acct.bereq_hdrbytes);
 	if (bo->acct.bereq_hdrbytes > hdrbytes) {
 		bo->acct.bereq_bodybytes = bo->acct.bereq_hdrbytes - hdrbytes;
 		bo->acct.bereq_hdrbytes = hdrbytes;
diff --git a/bin/varnishd/http1/cache_http1_line.c b/bin/varnishd/http1/cache_http1_line.c
new file mode 100644
index 0000000..95c668f
--- /dev/null
+++ b/bin/varnishd/http1/cache_http1_line.c
@@ -0,0 +1,295 @@
+/*-
+ * Copyright (c) 2006 Verdens Gang AS
+ * Copyright (c) 2006-2011 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Write data to fd
+ * We try to use writev() if possible in order to minimize number of
+ * syscalls made and packets sent.  It also just might allow the worker
+ * thread to complete the request without holding stuff locked.
+ */
+
+#include "config.h"
+
+#include <sys/types.h>
+#include <sys/uio.h>
+
+#include <limits.h>
+#include <stdio.h>
+
+#include "cache/cache.h"
+#include "vtim.h"
+
+/*--------------------------------------------------------------------*/
+
+struct v1l {
+	unsigned		magic;
+#define V1L_MAGIC		0x2f2142e5
+	int			*wfd;
+	unsigned		werr;	/* valid after V1L_Flush() */
+	struct iovec		*iov;
+	unsigned		siov;
+	unsigned		niov;
+	ssize_t			liov;
+	ssize_t			cliov;
+	unsigned		ciov;	/* Chunked header marker */
+	double			t0;
+	struct vsl_log		*vsl;
+	ssize_t			cnt;	/* Flushed byte count */
+};
+
+/*--------------------------------------------------------------------
+ */
+
+void
+V1L_Reserve(struct worker *wrk, int *fd, struct vsl_log *vsl, double t0)
+{
+	struct v1l *v1l;
+	unsigned u;
+
+	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	AZ(wrk->v1l);
+	v1l = WS_Alloc(wrk->aws, sizeof *v1l);
+	AN(v1l);
+	INIT_OBJ(v1l, V1L_MAGIC);
+	u = WS_Reserve(wrk->aws, 0);
+	u = PRNDDN(u);
+	u /= sizeof(struct iovec);
+	if (u > IOV_MAX)
+		u = IOV_MAX;
+	AN(u);
+	v1l->iov = (void*)PRNDUP(wrk->aws->f);
+	v1l->siov = u;
+	v1l->ciov = u;
+	v1l->werr = 0;
+	v1l->liov = 0;
+	v1l->niov = 0;
+	v1l->wfd = fd;
+	v1l->t0 = t0;
+	v1l->vsl = vsl;
+	wrk->v1l = v1l;
+}
+
+static void
+v1l_release(struct worker *wrk, uint64_t *pacc)
+{
+	struct v1l *v1l;
+
+	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	v1l = wrk->v1l;
+	wrk->v1l = NULL;
+	CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
+	if (pacc != NULL)
+		*pacc += v1l->cnt;
+	WS_Release(wrk->aws, 0);
+	WS_Reset(wrk->aws, NULL);
+}
+
+static void
+v1l_prune(struct v1l *v1l, ssize_t bytes)
+{
+	ssize_t used = 0;
+	ssize_t j, used_here;
+
+	for (j = 0; j < v1l->niov; j++) {
+		if (used + v1l->iov[j].iov_len > bytes) {
+			/* Cutoff is in this iov */
+			used_here = bytes - used;
+			v1l->iov[j].iov_len -= used_here;
+			v1l->iov[j].iov_base =
+			    (char*)v1l->iov[j].iov_base + used_here;
+			memmove(v1l->iov, &v1l->iov[j],
+			    (v1l->niov - j) * sizeof(struct iovec));
+			v1l->niov -= j;
+			v1l->liov -= bytes;
+			return;
+		}
+		used += v1l->iov[j].iov_len;
+	}
+	AZ(v1l->liov);
+}
+
+unsigned
+V1L_Flush(const struct worker *wrk)
+{
+	ssize_t i;
+	struct v1l *v1l;
+	char cbuf[32];
+
+	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	v1l = wrk->v1l;
+	CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
+	AN(v1l->wfd);
+
+	/* For chunked, there must be one slot reserved for the chunked tail */
+	if (v1l->ciov < v1l->siov)
+		assert(v1l->niov < v1l->siov);
+
+	if (*v1l->wfd >= 0 && v1l->liov > 0 && v1l->werr == 0) {
+		if (v1l->ciov < v1l->siov && v1l->cliov > 0) {
+			/* Add chunk head & tail */
+			bprintf(cbuf, "00%zx\r\n", v1l->cliov);
+			i = strlen(cbuf);
+			v1l->iov[v1l->ciov].iov_base = cbuf;
+			v1l->iov[v1l->ciov].iov_len = i;
+			v1l->liov += i;
+
+			v1l->iov[v1l->niov].iov_base = cbuf + i - 2;
+			v1l->iov[v1l->niov++].iov_len = 2;
+			v1l->liov += 2;
+		} else if (v1l->ciov < v1l->siov) {
+			v1l->iov[v1l->ciov].iov_base = cbuf;
+			v1l->iov[v1l->ciov].iov_len = 0;
+		}
+
+		i = writev(*v1l->wfd, v1l->iov, v1l->niov);
+		if (i > 0)
+			v1l->cnt += i;
+		while (i != v1l->liov && i > 0) {
+			/* Remove sent data from start of I/O vector,
+			 * then retry; we hit a timeout, but some data
+			 * was sent.
+			 *
+			 * XXX: Add a "minimum sent data per timeout
+			 * counter to prevent slowlaris attacks
+			*/
+
+			if (VTIM_real() - v1l->t0 > cache_param->send_timeout) {
+				VSLb(v1l->vsl, SLT_Debug,
+				    "Hit total send timeout, "
+				    "wrote = %zd/%zd; not retrying",
+				    i, v1l->liov);
+				i = -1;
+				break;
+			}
+
+			VSLb(v1l->vsl, SLT_Debug,
+			    "Hit idle send timeout, wrote = %zd/%zd; retrying",
+			    i, v1l->liov);
+
+			v1l_prune(v1l, i);
+			i = writev(*v1l->wfd, v1l->iov, v1l->niov);
+			if (i > 0)
+				v1l->cnt += i;
+		}
+		if (i <= 0) {
+			v1l->werr++;
+			VSLb(v1l->vsl, SLT_Debug,
+			    "Write error, retval = %zd, len = %zd, errno = %s",
+			    i, v1l->liov, strerror(errno));
+		}
+	}
+	v1l->liov = 0;
+	v1l->cliov = 0;
+	v1l->niov = 0;
+	if (v1l->ciov < v1l->siov)
+		v1l->ciov = v1l->niov++;
+	return (v1l->werr);
+}
+
+unsigned
+V1L_FlushRelease(struct worker *wrk, uint64_t *pacc)
+{
+	unsigned u;
+
+	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	AN(wrk->v1l->wfd);
+	u = V1L_Flush(wrk);
+	v1l_release(wrk, pacc);
+	return (u);
+}
+
+unsigned
+V1L_Write(const struct worker *wrk, const void *ptr, int len)
+{
+	struct v1l *v1l;
+
+	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	v1l = wrk->v1l;
+	CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
+	AN(v1l->wfd);
+	if (len == 0 || *v1l->wfd < 0)
+		return (0);
+	if (len == -1)
+		len = strlen(ptr);
+	if (v1l->niov >= v1l->siov - (v1l->ciov < v1l->siov ? 1 : 0))
+		(void)V1L_Flush(wrk);
+	v1l->iov[v1l->niov].iov_base = TRUST_ME(ptr);
+	v1l->iov[v1l->niov].iov_len = len;
+	v1l->liov += len;
+	v1l->niov++;
+	if (v1l->ciov < v1l->siov) {
+		assert(v1l->niov < v1l->siov);
+		v1l->cliov += len;
+	}
+	return (len);
+}
+
+void
+V1L_Chunked(const struct worker *wrk)
+{
+	struct v1l *v1l;
+
+	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	v1l = wrk->v1l;
+	CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
+
+	assert(v1l->ciov == v1l->siov);
+	/*
+	 * If there are not space for chunked header, a chunk of data and
+	 * a chunk tail, we might as well flush right away.
+	 */
+	if (v1l->niov + 3 >= v1l->siov)
+		(void)V1L_Flush(wrk);
+	v1l->ciov = v1l->niov++;
+	v1l->cliov = 0;
+	assert(v1l->ciov < v1l->siov);
+	assert(v1l->niov < v1l->siov);
+}
+
+/*
+ * XXX: It is not worth the complexity to attempt to get the
+ * XXX: end of chunk into the V1L_Flush(), because most of the time
+ * XXX: if not always, that is a no-op anyway, because the calling
+ * XXX: code already called V1L_Flush() to release local storage.
+ */
+
+void
+V1L_EndChunk(const struct worker *wrk)
+{
+	struct v1l *v1l;
+
+	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
+	v1l = wrk->v1l;
+	CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC);
+
+	assert(v1l->ciov < v1l->siov);
+	(void)V1L_Flush(wrk);
+	v1l->ciov = v1l->siov;
+	v1l->niov = 0;
+	v1l->cliov = 0;
+	(void)V1L_Write(wrk, "0\r\n\r\n", -1);
+}
diff --git a/bin/varnishd/http1/cache_http1_pipe.c b/bin/varnishd/http1/cache_http1_pipe.c
index f3fdb6a..c83b6c8 100644
--- a/bin/varnishd/http1/cache_http1_pipe.c
+++ b/bin/varnishd/http1/cache_http1_pipe.c
@@ -124,14 +124,14 @@ V1P_Process(struct req *req, struct busyobj *bo)
 	bo->director_state = DIR_S_BODY;
 	(void)VTCP_blocking(fd);
 
-	WRW_Reserve(wrk, &fd, bo->vsl, req->t_req);
+	V1L_Reserve(wrk, &fd, bo->vsl, req->t_req);
 	hdrbytes = HTTP1_Write(wrk, bo->bereq, HTTP1_Req);
 
 	if (req->htc->pipeline_b != NULL)
-		(void)WRW_Write(wrk, req->htc->pipeline_b,
+		(void)V1L_Write(wrk, req->htc->pipeline_b,
 		    req->htc->pipeline_e - req->htc->pipeline_b);
 
-	i = WRW_FlushRelease(wrk, &acct_pipe.bereq);
+	i = V1L_FlushRelease(wrk, &acct_pipe.bereq);
 	if (acct_pipe.bereq > hdrbytes) {
 		acct_pipe.in = acct_pipe.bereq - hdrbytes;
 		acct_pipe.bereq = hdrbytes;
diff --git a/bin/varnishd/http1/cache_http1_proto.c b/bin/varnishd/http1/cache_http1_proto.c
index 70a007e..45f4784 100644
--- a/bin/varnishd/http1/cache_http1_proto.c
+++ b/bin/varnishd/http1/cache_http1_proto.c
@@ -529,9 +529,9 @@ http1_WrTxt(const struct worker *wrk, const txt *hh, const char *suf)
 	AN(hh);
 	AN(hh->b);
 	AN(hh->e);
-	u = WRW_Write(wrk, hh->b, hh->e - hh->b);
+	u = V1L_Write(wrk, hh->b, hh->e - hh->b);
 	if (suf != NULL)
-		u += WRW_Write(wrk, suf, -1);
+		u += V1L_Write(wrk, suf, -1);
 	return (u);
 }
 
@@ -550,6 +550,6 @@ HTTP1_Write(const struct worker *w, const struct http *hp, const int *hf)
 
 	for (u = HTTP_HDR_FIRST; u < hp->nhd; u++)
 		l += http1_WrTxt(w, &hp->hd[u], "\r\n");
-	l += WRW_Write(w, "\r\n", -1);
+	l += V1L_Write(w, "\r\n", -1);
 	return (l);
 }



More information about the varnish-commit mailing list