[master] 88d1d1d Add std.late_100_continue to postpone sending a 100 Continue response
Nils Goroll
nils.goroll at uplex.de
Thu Feb 23 17:46:05 CET 2017
commit 88d1d1dce4426584003c6b690436c96c3c3f28fa
Author: Nils Goroll <nils.goroll at uplex.de>
Date: Thu Feb 23 17:44:43 2017 +0100
Add std.late_100_continue to postpone sending a 100 Continue response
When postponing,
- re-create the Expect header for pipe mode
- For synth content, close the connection by default, but allow
VCL to veto this behavior
Prevent drain-reading the body if the connection is closed anyway
Respect Connection: close from vcl_synth
diff --git a/bin/varnishd/cache/cache_req_body.c b/bin/varnishd/cache/cache_req_body.c
index 2036094..f92dbba 100644
--- a/bin/varnishd/cache/cache_req_body.c
+++ b/bin/varnishd/cache/cache_req_body.c
@@ -238,6 +238,8 @@ VRB_Ignore(struct req *req)
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
+ if (req->doclose)
+ return (0);
if (req->req_body_status == REQ_BODY_WITH_LEN ||
req->req_body_status == REQ_BODY_WITHOUT_LEN)
(void)VRB_Iterate(req, httpq_req_body_discard, NULL);
diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c
index 821a1e7..459ef6f 100644
--- a/bin/varnishd/cache/cache_req_fsm.c
+++ b/bin/varnishd/cache/cache_req_fsm.c
@@ -193,6 +193,15 @@ cnt_synth(struct worker *wrk, struct req *req)
http_PrintfHeader(req->resp, "X-Varnish: %u", VXID(req->vsl->wid));
http_PutResponse(h, "HTTP/1.1", req->err_code, req->err_reason);
+ /*
+ * For late 100-continue, we suggest to VCL to close the connection to
+ * neither send a 100-continue nor drain-read the request. But VCL has
+ * the option to veto by removing Connection: close
+ */
+ if (req->want100cont) {
+ http_SetHeader(h, "Connection: close");
+ }
+
synth_body = VSB_new_auto();
AN(synth_body);
@@ -224,6 +233,9 @@ cnt_synth(struct worker *wrk, struct req *req)
http_PrintfHeader(req->resp, "Content-Length: %zd",
VSB_len(synth_body));
+ if (!req->doclose && http_HdrIs(req->resp, H_Connection, "close"))
+ req->doclose = SC_RESP_CLOSE;
+
/* Discard any lingering request body before delivery */
(void)VRB_Ignore(req);
@@ -613,6 +625,11 @@ cnt_pipe(struct worker *wrk, struct req *req)
http_PrintfHeader(bo->bereq, "X-Varnish: %u", VXID(req->vsl->wid));
http_SetHeader(bo->bereq, "Connection: close");
+ if (req->want100cont) {
+ http_SetHeader(bo->bereq, "Expect: 100-continue");
+ req->want100cont = 0;
+ }
+
VCL_pipe_method(req->vcl, wrk, req, bo, NULL);
switch (wrk->handling) {
@@ -747,7 +764,7 @@ cnt_recv(struct worker *wrk, struct req *req)
VCL_recv_method(req->vcl, wrk, req, NULL, NULL);
}
- if (req->want100cont) {
+ if (req->want100cont && !req->late100cont) {
req->want100cont = 0;
if (req->transport->minimal_response(req, 100))
return (-1);
diff --git a/bin/varnishd/http2/cache_http2_deliver.c b/bin/varnishd/http2/cache_http2_deliver.c
index a589428..ef45943 100644
--- a/bin/varnishd/http2/cache_http2_deliver.c
+++ b/bin/varnishd/http2/cache_http2_deliver.c
@@ -141,6 +141,7 @@ h2_minimal_response(struct req *req, uint16_t status)
if (status >= 400)
req->err_code = status;
+ /* XXX return code checking once H2_Send returns anything but 0 */
H2_Send(req->wrk, r2, 1,
H2_FRAME_HEADERS, H2FF_HEADERS_END_HEADERS,
l, buf);
diff --git a/bin/varnishtest/tests/c00018.vtc b/bin/varnishtest/tests/c00018.vtc
index d5fc0e6..f61b6c5 100644
--- a/bin/varnishtest/tests/c00018.vtc
+++ b/bin/varnishtest/tests/c00018.vtc
@@ -5,11 +5,146 @@ server s1 {
txresp
rxreq
txresp
+ rxreq
+ txresp
+ rxreq
+ txresp
+ rxreq
+ txresp
+} -start
+
+varnish v1 -vcl+backend {
+ import std;
+
+ sub vcl_recv {
+ if (req.url == "/") {
+ return (pass);
+ }
+
+ std.late_100_continue(true);
+
+ if (req.url ~ "^/err") {
+ return (synth(405));
+ }
+
+ if (req.url ~ "^/synthnocl") {
+ return (synth(200));
+ }
+
+ if (req.url == "/latecache") {
+ std.cache_req_body(1KB);
+ }
+ return (pass);
+
+ }
+ sub vcl_synth {
+ if (req.url == "/synthnocl") {
+ unset resp.http.Connection;
+ }
+ }
+
+} -start
+
+logexpect l1 -v v1 -g raw {
+ # base case: bad Expect
+ expect * 1001 RespStatus 417
+
+ # base case: Immediate 100-continue
+ expect * 1003 ReqUnset {^Expect: 100-continue$}
+ expect 0 1003 ReqStart {^.}
+ expect 0 1003 ReqMethod {^POST$}
+ expect 0 1003 ReqURL {^/$}
+ expect 0 1003 ReqProtocol {^HTTP/1.1$}
+ expect 0 1003 ReqHeader {^Content-Length: 20$}
+ expect 0 1003 ReqHeader {^X-Forwarded-For:}
+ expect 0 1003 VCL_call {^RECV$}
+ expect 0 1003 VCL_return {^pass$}
+ expect 0 1003 RespProtocol {^HTTP/1.1$}
+ expect 0 1003 RespStatus {^100$}
+ expect 0 1003 RespReason {^Continue$}
+ expect 0 1003 VCL_call {^HASH$}
+
+ # no 100 if client has already sent body (and it fits)
+ expect * 1005 ReqUnset {^Expect: 100-continue$}
+ expect 0 1005 ReqStart {^.}
+ expect 0 1005 ReqMethod {^POST$}
+ expect 0 1005 ReqURL {^/$}
+ expect 0 1005 ReqProtocol {^HTTP/1.1$}
+ expect 0 1005 ReqHeader {^Content-Length: 3$}
+ expect 0 1005 ReqHeader {^X-Forwarded-For:}
+ expect 0 1005 VCL_call {^RECV$}
+ expect 0 1005 VCL_return {^pass$}
+ expect 0 1005 VCL_call {^HASH$}
+
+ # late no cache
+ expect * 1007 ReqUnset {^Expect: 100-continue$}
+ expect 0 1007 ReqStart {^.}
+ expect 0 1007 ReqMethod {^POST$}
+ expect 0 1007 ReqURL {^/late$}
+ expect 0 1007 ReqProtocol {^HTTP/1.1$}
+ expect 0 1007 ReqHeader {^Content-Length: 20$}
+ expect 0 1007 ReqHeader {^X-Forwarded-For:}
+ expect 0 1007 VCL_call {^RECV$}
+ expect 0 1007 VCL_return {^pass$}
+ expect 0 1007 VCL_call {^HASH$}
+ expect 0 1007 VCL_return {^lookup$}
+ expect 0 1007 VCL_call {^PASS$}
+ expect 0 1007 VCL_return {^fetch$}
+ expect 0 1007 Link {^bereq 1008 pass$}
+ expect 0 1007 Storage {^malloc Transient$}
+ expect 0 1007 RespProtocol {^HTTP/1.1$}
+ expect 0 1007 RespStatus {^100$}
+ expect 0 1007 RespReason {^Continue$}
+ expect 0 1007 Timestamp {^ReqBody:}
+ expect 0 1007 Timestamp {^Fetch:}
+
+ # late cache
+ expect * 1009 ReqUnset {^Expect: 100-continue$}
+ expect 0 1009 ReqStart {^.}
+ expect 0 1009 ReqMethod {^POST$}
+ expect 0 1009 ReqURL {^/latecache$}
+ expect 0 1009 ReqProtocol {^HTTP/1.1$}
+ expect 0 1009 ReqHeader {^Content-Length: 20$}
+ expect 0 1009 ReqHeader {^X-Forwarded-For:}
+ expect 0 1009 VCL_call {^RECV$}
+ expect 0 1009 Storage {^malloc Transient$}
+ expect 0 1009 RespProtocol {^HTTP/1.1$}
+ expect 0 1009 RespStatus {^100$}
+ expect 0 1009 RespReason {^Continue$}
+ expect 0 1009 Timestamp {^ReqBody:}
+ expect 0 1009 VCL_return {^pass$}
+ expect 0 1009 VCL_call {^HASH$}
+
+ # err
+ expect * 1011 ReqUnset {^Expect: 100-continue$}
+ expect 0 1011 ReqStart {^.}
+ expect 0 1011 ReqMethod {^POST$}
+ expect 0 1011 ReqURL {^/err$}
+ expect 0 1011 ReqProtocol {^HTTP/1.1$}
+ expect 0 1011 ReqHeader {^Content-Length: 20$}
+ expect 0 1011 ReqHeader {^X-Forwarded-For:}
+ expect 0 1011 VCL_call {^RECV$}
+ expect 0 1011 VCL_return {^synth$}
+ expect 0 1011 VCL_call {^HASH$}
+ expect 0 1011 VCL_return {^lookup$}
+ expect 0 1011 Timestamp {^Process:}
+ expect 0 1011 RespHeader {^Date:}
+ expect 0 1011 RespHeader {^Server: Varnish$}
+ expect 0 1011 RespHeader {^X-Varnish: 1011$}
+ expect 0 1011 RespProtocol {^HTTP/1.1$}
+ expect 0 1011 RespStatus {^405$}
+
} -start
-varnish v1 -vcl+backend { } -start
+client c1 {
+ # base case: bad Expect
+ txreq -url "/" -req POST -hdr "Expect: 101-continue" -body "foo"
+ rxresp
+ expect resp.status == 417
+} -run
client c1 {
+ # base case: Immediate 100-continue
txreq -url "/" -req POST -hdr "Expect: 100-continue " \
-hdr "Content-Length: 20"
rxresp
@@ -18,11 +153,67 @@ client c1 {
rxresp
expect resp.status == 200
+ # no 100 if client has already sent body (and it fits)
txreq -url "/" -req POST -hdr "Expect: 100-continue " -body "foo"
rxresp
expect resp.status == 200
- txreq -url "/" -req POST -hdr "Expect: 101-continue" -body "foo"
+ # late no cache
+ txreq -url "/late" -req POST -hdr "Expect: 100-continue " \
+ -hdr "Content-Length: 20"
rxresp
- expect resp.status == 417
+ expect resp.status == 100
+ send "01234567890123456789"
+ rxresp
+ expect resp.status == 200
+
+ # late cache
+ txreq -url "/latecache" -req POST -hdr "Expect: 100-continue " \
+ -hdr "Content-Length: 20"
+ rxresp
+ expect resp.status == 100
+ send "01234567890123456789"
+ rxresp
+ expect resp.status == 200
+
+ # err
+ txreq -url "/err" -req POST -hdr "Expect: 100-continue " \
+ -hdr "Content-Length: 20"
+ rxresp
+ expect resp.status == 405
+ expect_close
} -run
+
+client c1 {
+ # Immediate 100-continue with Client Connection: close
+ txreq -url "/" -req POST -hdr "Expect: 100-continue " \
+ -hdr "Content-Length: 20" \
+ -hdr "Connection: close"
+ rxresp
+ expect resp.status == 100
+ send "01234567890123456789"
+ rxresp
+ expect resp.status == 200
+ expect_close
+} -run
+
+client c1 {
+ # vcl vetoing the Connection: close in synth
+ txreq -url "/synthnocl" -req POST -hdr "Expect: 100-continue " \
+ -hdr "Content-Length: 20"
+ rxresp
+ expect resp.status == 100
+ send "01234567890123456789"
+ rxresp
+ expect resp.status == 200
+
+ # vcl vetoing the Connection: close in synth but client close
+ txreq -url "/synthnocl" -req POST -hdr "Expect: 100-continue " \
+ -hdr "Content-Length: 20" \
+ -hdr "Connection: close"
+ rxresp
+ expect resp.status == 200
+ expect_close
+} -run
+
+logexpect l1 -wait
diff --git a/include/tbl/req_flags.h b/include/tbl/req_flags.h
index fba8943..e23b9c1 100644
--- a/include/tbl/req_flags.h
+++ b/include/tbl/req_flags.h
@@ -36,6 +36,7 @@ REQ_FLAG(hash_always_miss, 1, 1, "")
REQ_FLAG(is_hit, 0, 0, "")
REQ_FLAG(waitinglist, 0, 0, "")
REQ_FLAG(want100cont, 0, 0, "")
+REQ_FLAG(late100cont, 0, 0, "")
#undef REQ_FLAG
/*lint -restore */
diff --git a/lib/libvmod_std/vmod.vcc b/lib/libvmod_std/vmod.vcc
index 7caa2c0..9917e0f 100644
--- a/lib/libvmod_std/vmod.vcc
+++ b/lib/libvmod_std/vmod.vcc
@@ -283,6 +283,31 @@ Description
Example
| set req.http.My-Env = getenv("MY_ENV");
+$Function VOID late_100_continue(BOOL late)
+
+Description
+ Controls when varnish reacts to an `Expect: 100-continue` client
+ request header.
+
+ Varnish always generates a `100 Continue` response if
+ requested by the by the client trough the `Expect:
+ 100-continue` header when waiting for request body data.
+
+ But, by default, the `100 Continue` response is already
+ generated immediately after `vcl_recv` returns to reduce
+ latencies under the assumption that the request body will be
+ read eventually.
+
+ Calling `std.late_100_continue(true)` in `vcl_recv` will cause
+ the `100 Continue` response to only be sent when needed. This
+ may cause additional latencies for processing request bodies,
+ but is the correct behavior by strict interpretation of
+ RFC7231.
+
+ This function has no effect outside `vcl_recv` and after
+ calling `std.cache_req_body()` or any other function consuming
+ the request body.
+
SEE ALSO
========
diff --git a/lib/libvmod_std/vmod_std.c b/lib/libvmod_std/vmod_std.c
index 94baed2..81e49f7 100644
--- a/lib/libvmod_std/vmod_std.c
+++ b/lib/libvmod_std/vmod_std.c
@@ -41,6 +41,7 @@
#include "vtcp.h"
#include "vsa.h"
#include "vtim.h"
+#include "vcl.h"
#include "cache/cache_director.h"
@@ -255,3 +256,24 @@ vmod_getenv(VRT_CTX, VCL_STRING name)
return (NULL);
return (getenv(name));
}
+
+VCL_VOID __match_proto__(td_std_late_100_continue)
+vmod_late_100_continue(VRT_CTX, VCL_BOOL late)
+{
+ struct req *req;
+
+ CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
+ if (ctx->method != VCL_MET_RECV) {
+ VSLb(ctx->vsl, SLT_VCL_Error,
+ "std.late_100_continue() only valid in vcl_recv{}");
+ return;
+ }
+
+ req = ctx->req;
+ CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
+
+ if (! req->want100cont)
+ return;
+
+ req->late100cont = !!late;
+}
More information about the varnish-commit
mailing list