| | varnish-cache/bin/varnishd/http2/cache_http2_send.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2016 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include <sys/uio.h> |
34 |
|
|
35 |
|
#include "cache/cache_varnishd.h" |
36 |
|
|
37 |
|
#include "cache/cache_transport.h" |
38 |
|
#include "http2/cache_http2.h" |
39 |
|
|
40 |
|
#include "vend.h" |
41 |
|
#include "vtim.h" |
42 |
|
|
43 |
|
#define H2_SEND_HELD(h2, r2) (VTAILQ_FIRST(&(h2)->txqueue) == (r2)) |
44 |
|
|
45 |
|
static h2_error |
46 |
20588 |
h2_errcheck(const struct h2_req *r2, const struct h2_sess *h2) |
47 |
|
{ |
48 |
20588 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
49 |
20588 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
50 |
|
|
51 |
20588 |
if (r2->error != NULL) |
52 |
3188 |
return (r2->error); |
53 |
17400 |
if (h2->error != NULL && r2->stream > h2->goaway_last_stream) |
54 |
23 |
return (h2->error); |
55 |
17377 |
return (NULL); |
56 |
20588 |
} |
57 |
|
|
58 |
|
static int |
59 |
508 |
h2_cond_wait(pthread_cond_t *cond, struct h2_sess *h2, struct h2_req *r2) |
60 |
|
{ |
61 |
508 |
vtim_dur tmo = 0.; |
62 |
|
vtim_real now; |
63 |
|
h2_error h2e; |
64 |
|
int r; |
65 |
|
|
66 |
508 |
AN(cond); |
67 |
508 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
68 |
508 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
69 |
|
|
70 |
508 |
Lck_AssertHeld(&h2->sess->mtx); |
71 |
|
|
72 |
508 |
if (cache_param->h2_window_timeout > 0.) |
73 |
508 |
tmo = cache_param->h2_window_timeout; |
74 |
|
|
75 |
508 |
r = Lck_CondWaitTimeout(cond, &h2->sess->mtx, tmo); |
76 |
508 |
assert(r == 0 || r == ETIMEDOUT); |
77 |
|
|
78 |
508 |
now = VTIM_real(); |
79 |
|
|
80 |
|
/* NB: when we grab h2_window_timeout before acquiring the session |
81 |
|
* lock we may time out, but once we wake up both send_timeout and |
82 |
|
* h2_window_timeout may have changed meanwhile. For this reason |
83 |
|
* h2_stream_tmo() may not log what timed out and we need to call |
84 |
|
* again with a magic NAN "now" that indicates to h2_stream_tmo() |
85 |
|
* that the stream reached the h2_window_timeout via the lock and |
86 |
|
* force it to log it. |
87 |
|
*/ |
88 |
508 |
h2e = h2_stream_tmo(h2, r2, now); |
89 |
508 |
if (h2e == NULL && r == ETIMEDOUT) { |
90 |
40 |
h2e = h2_stream_tmo(h2, r2, NAN); |
91 |
40 |
AN(h2e); |
92 |
40 |
} |
93 |
|
|
94 |
508 |
if (r2->error == NULL) |
95 |
468 |
r2->error = h2e; |
96 |
|
|
97 |
508 |
return (h2e != NULL ? -1 : 0); |
98 |
|
} |
99 |
|
|
100 |
|
static void |
101 |
28648 |
h2_send_get_locked(struct worker *wrk, struct h2_sess *h2, struct h2_req *r2) |
102 |
|
{ |
103 |
|
|
104 |
28648 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
105 |
28648 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
106 |
28648 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
107 |
|
|
108 |
28648 |
Lck_AssertHeld(&h2->sess->mtx); |
109 |
28648 |
if (&wrk->cond == h2->cond) |
110 |
14720 |
ASSERT_RXTHR(h2); |
111 |
28648 |
r2->wrk = wrk; |
112 |
28648 |
VTAILQ_INSERT_TAIL(&h2->txqueue, r2, tx_list); |
113 |
28963 |
while (!H2_SEND_HELD(h2, r2)) |
114 |
315 |
AZ(Lck_CondWait(&wrk->cond, &h2->sess->mtx)); |
115 |
28648 |
r2->wrk = NULL; |
116 |
28648 |
} |
117 |
|
|
118 |
|
void |
119 |
28208 |
H2_Send_Get(struct worker *wrk, struct h2_sess *h2, struct h2_req *r2) |
120 |
|
{ |
121 |
|
|
122 |
28208 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
123 |
28208 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
124 |
28208 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
125 |
|
|
126 |
28208 |
Lck_Lock(&h2->sess->mtx); |
127 |
28208 |
h2_send_get_locked(wrk, h2, r2); |
128 |
28208 |
Lck_Unlock(&h2->sess->mtx); |
129 |
28208 |
} |
130 |
|
|
131 |
|
static void |
132 |
28649 |
h2_send_rel_locked(struct h2_sess *h2, const struct h2_req *r2) |
133 |
|
{ |
134 |
28649 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
135 |
28649 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
136 |
|
|
137 |
28649 |
Lck_AssertHeld(&h2->sess->mtx); |
138 |
28649 |
AN(H2_SEND_HELD(h2, r2)); |
139 |
28649 |
VTAILQ_REMOVE(&h2->txqueue, r2, tx_list); |
140 |
28649 |
r2 = VTAILQ_FIRST(&h2->txqueue); |
141 |
28649 |
if (r2 != NULL) { |
142 |
315 |
CHECK_OBJ_NOTNULL(r2->wrk, WORKER_MAGIC); |
143 |
315 |
PTOK(pthread_cond_signal(&r2->wrk->cond)); |
144 |
315 |
} |
145 |
28649 |
} |
146 |
|
|
147 |
|
void |
148 |
28209 |
H2_Send_Rel(struct h2_sess *h2, const struct h2_req *r2) |
149 |
|
{ |
150 |
28209 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
151 |
28209 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
152 |
|
|
153 |
28209 |
Lck_Lock(&h2->sess->mtx); |
154 |
28209 |
h2_send_rel_locked(h2, r2); |
155 |
28209 |
Lck_Unlock(&h2->sess->mtx); |
156 |
28209 |
} |
157 |
|
|
158 |
|
static void |
159 |
27991 |
h2_mk_hdr(uint8_t *hdr, h2_frame ftyp, uint8_t flags, |
160 |
|
uint32_t len, uint32_t stream) |
161 |
|
{ |
162 |
|
|
163 |
27991 |
AN(hdr); |
164 |
27991 |
assert(len < (1U << 24)); |
165 |
27991 |
vbe32enc(hdr, len << 8); |
166 |
27991 |
hdr[3] = ftyp->type; |
167 |
27991 |
hdr[4] = flags; |
168 |
27991 |
vbe32enc(hdr + 5, stream); |
169 |
27991 |
} |
170 |
|
|
171 |
|
/* |
172 |
|
* This is the "raw" frame sender, all per-stream accounting and |
173 |
|
* prioritization must have happened before this is called, and |
174 |
|
* the session mtx must be held. |
175 |
|
*/ |
176 |
|
|
177 |
|
void |
178 |
27992 |
H2_Send_Frame(struct worker *wrk, struct h2_sess *h2, |
179 |
|
h2_frame ftyp, uint8_t flags, |
180 |
|
uint32_t len, uint32_t stream, const void *ptr) |
181 |
|
{ |
182 |
|
uint8_t hdr[9]; |
183 |
|
ssize_t s; |
184 |
|
struct iovec iov[2]; |
185 |
|
|
186 |
27992 |
(void)wrk; |
187 |
|
|
188 |
27992 |
AN(ftyp); |
189 |
27992 |
AZ(flags & ~(ftyp->flags)); |
190 |
27992 |
if (stream == 0) |
191 |
18198 |
AZ(ftyp->act_szero); |
192 |
|
else |
193 |
9794 |
AZ(ftyp->act_snonzero); |
194 |
|
|
195 |
27992 |
h2_mk_hdr(hdr, ftyp, flags, len, stream); |
196 |
27992 |
Lck_Lock(&h2->sess->mtx); |
197 |
27992 |
VSLb_bin(h2->vsl, SLT_H2TxHdr, 9, hdr); |
198 |
27992 |
h2->srq->acct.resp_hdrbytes += 9; |
199 |
27992 |
if (ftyp->overhead) |
200 |
21252 |
h2->srq->acct.resp_bodybytes += len; |
201 |
27992 |
Lck_Unlock(&h2->sess->mtx); |
202 |
|
|
203 |
27992 |
memset(iov, 0, sizeof iov); |
204 |
27992 |
iov[0].iov_base = (void*)hdr; |
205 |
27992 |
iov[0].iov_len = sizeof hdr; |
206 |
27992 |
iov[1].iov_base = TRUST_ME(ptr); |
207 |
27992 |
iov[1].iov_len = len; |
208 |
27992 |
s = writev(h2->sess->fd, iov, len == 0 ? 1 : 2); |
209 |
27992 |
if (s != sizeof hdr + len) { |
210 |
71 |
if (errno == EWOULDBLOCK) { |
211 |
0 |
H2S_Lock_VSLb(h2, SLT_SessError, |
212 |
0 |
"H2: stream %u: Hit idle_send_timeout", stream); |
213 |
0 |
} |
214 |
|
/* |
215 |
|
* There is no point in being nice here, we will be unable |
216 |
|
* to send a GOAWAY once the code unrolls, so go directly |
217 |
|
* to the finale and be done with it. |
218 |
|
*/ |
219 |
71 |
h2->error = H2CE_PROTOCOL_ERROR; |
220 |
27992 |
} else if (len > 0) { |
221 |
21349 |
Lck_Lock(&h2->sess->mtx); |
222 |
21349 |
VSLb_bin(h2->vsl, SLT_H2TxBody, len, ptr); |
223 |
21349 |
Lck_Unlock(&h2->sess->mtx); |
224 |
21349 |
} |
225 |
27992 |
} |
226 |
|
|
227 |
|
static int64_t |
228 |
2018 |
h2_win_limit(const struct h2_req *r2, const struct h2_sess *h2) |
229 |
|
{ |
230 |
|
|
231 |
2018 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
232 |
2018 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
233 |
2018 |
CHECK_OBJ_NOTNULL(h2->req0, H2_REQ_MAGIC); |
234 |
|
|
235 |
2018 |
Lck_AssertHeld(&h2->sess->mtx); |
236 |
2018 |
return (vmin_t(int64_t, r2->t_window, h2->req0->t_window)); |
237 |
|
} |
238 |
|
|
239 |
|
static void |
240 |
2018 |
h2_win_charge(struct h2_req *r2, const struct h2_sess *h2, uint32_t w) |
241 |
|
{ |
242 |
2018 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
243 |
2018 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
244 |
2018 |
CHECK_OBJ_NOTNULL(h2->req0, H2_REQ_MAGIC); |
245 |
|
|
246 |
2018 |
Lck_AssertHeld(&h2->sess->mtx); |
247 |
2018 |
r2->t_window -= w; |
248 |
2018 |
h2->req0->t_window -= w; |
249 |
2018 |
} |
250 |
|
|
251 |
|
static int64_t |
252 |
3310 |
h2_do_window(struct worker *wrk, struct h2_req *r2, |
253 |
|
struct h2_sess *h2, int64_t wanted) |
254 |
|
{ |
255 |
3310 |
int64_t w = 0; |
256 |
|
|
257 |
3310 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
258 |
3310 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
259 |
3310 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
260 |
|
|
261 |
3310 |
if (wanted == 0) |
262 |
1132 |
return (0); |
263 |
|
|
264 |
2178 |
Lck_Lock(&h2->sess->mtx); |
265 |
2178 |
if (r2->t_window <= 0 || h2->req0->t_window <= 0) { |
266 |
440 |
r2->t_winupd = VTIM_real(); |
267 |
440 |
h2_send_rel_locked(h2, r2); |
268 |
|
|
269 |
440 |
assert(h2->winup_streams >= 0); |
270 |
440 |
h2->winup_streams++; |
271 |
|
|
272 |
880 |
while (r2->t_window <= 0 && h2_errcheck(r2, h2) == NULL) { |
273 |
440 |
r2->cond = &wrk->cond; |
274 |
440 |
(void)h2_cond_wait(r2->cond, h2, r2); |
275 |
440 |
r2->cond = NULL; |
276 |
|
} |
277 |
|
|
278 |
508 |
while (h2->req0->t_window <= 0 && h2_errcheck(r2, h2) == NULL) |
279 |
68 |
(void)h2_cond_wait(h2->winupd_cond, h2, r2); |
280 |
|
|
281 |
440 |
if (h2_errcheck(r2, h2) == NULL) { |
282 |
280 |
w = vmin_t(int64_t, h2_win_limit(r2, h2), wanted); |
283 |
280 |
h2_win_charge(r2, h2, w); |
284 |
280 |
assert (w > 0); |
285 |
280 |
} |
286 |
|
|
287 |
440 |
if (r2->error == H2SE_BROKE_WINDOW && |
288 |
80 |
h2->open_streams <= h2->winup_streams) { |
289 |
0 |
VSLb(h2->vsl, SLT_SessError, "H2: window bankrupt"); |
290 |
0 |
h2->error = r2->error = H2CE_BANKRUPT; |
291 |
0 |
} |
292 |
|
|
293 |
440 |
assert(h2->winup_streams > 0); |
294 |
440 |
h2->winup_streams--; |
295 |
|
|
296 |
440 |
h2_send_get_locked(wrk, h2, r2); |
297 |
440 |
} |
298 |
|
|
299 |
2178 |
if (w == 0 && h2_errcheck(r2, h2) == NULL) { |
300 |
1738 |
assert(r2->t_window > 0); |
301 |
1738 |
assert(h2->req0->t_window > 0); |
302 |
1738 |
w = h2_win_limit(r2, h2); |
303 |
1738 |
if (w > wanted) |
304 |
1458 |
w = wanted; |
305 |
1738 |
h2_win_charge(r2, h2, w); |
306 |
1738 |
assert (w > 0); |
307 |
1738 |
} |
308 |
2178 |
r2->t_winupd = 0; |
309 |
2178 |
Lck_Unlock(&h2->sess->mtx); |
310 |
2178 |
return (w); |
311 |
3310 |
} |
312 |
|
|
313 |
|
/* |
314 |
|
* This is the per-stream frame sender. |
315 |
|
* XXX: priority |
316 |
|
*/ |
317 |
|
|
318 |
|
static void |
319 |
7156 |
h2_send(struct worker *wrk, struct h2_req *r2, h2_frame ftyp, uint8_t flags, |
320 |
|
uint32_t len, const void *ptr, uint64_t *counter) |
321 |
|
{ |
322 |
|
struct h2_sess *h2; |
323 |
|
uint32_t mfs, tf; |
324 |
|
const char *p; |
325 |
|
uint8_t final_flags; |
326 |
|
|
327 |
7156 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
328 |
7156 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
329 |
7156 |
h2 = r2->h2sess; |
330 |
7156 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
331 |
7156 |
assert(len == 0 || ptr != NULL); |
332 |
7156 |
AN(counter); |
333 |
|
|
334 |
7156 |
AN(H2_SEND_HELD(h2, r2)); |
335 |
|
|
336 |
7156 |
if (h2_errcheck(r2, h2) != NULL) |
337 |
1217 |
return; |
338 |
|
|
339 |
5939 |
AN(ftyp); |
340 |
5939 |
AZ(flags & ~(ftyp->flags)); |
341 |
5939 |
if (r2->stream == 0) |
342 |
0 |
AZ(ftyp->act_szero); |
343 |
|
else |
344 |
5939 |
AZ(ftyp->act_snonzero); |
345 |
|
|
346 |
5939 |
Lck_Lock(&h2->sess->mtx); |
347 |
5939 |
mfs = h2->remote_settings.max_frame_size; |
348 |
5939 |
if (r2->counted && ( |
349 |
5939 |
(ftyp == H2_F_HEADERS && (flags & H2FF_HEADERS_END_STREAM)) || |
350 |
2590 |
(ftyp == H2_F_DATA && (flags & H2FF_DATA_END_STREAM)) || |
351 |
0 |
ftyp == H2_F_RST_STREAM |
352 |
|
)) { |
353 |
8866 |
assert(h2->open_streams > 0); |
354 |
3012 |
h2->open_streams--; |
355 |
3012 |
r2->counted = 0; |
356 |
3012 |
} |
357 |
5939 |
Lck_Unlock(&h2->sess->mtx); |
358 |
|
|
359 |
5939 |
if (ftyp->respect_window) { |
360 |
2590 |
tf = h2_do_window(wrk, r2, h2, (len > mfs) ? mfs : len); |
361 |
2590 |
if (h2_errcheck(r2, h2) != NULL) |
362 |
0 |
return; |
363 |
2590 |
AN(H2_SEND_HELD(h2, r2)); |
364 |
2590 |
} else |
365 |
3349 |
tf = mfs; |
366 |
|
|
367 |
5939 |
if (len <= tf) { |
368 |
5539 |
H2_Send_Frame(wrk, h2, ftyp, flags, len, r2->stream, ptr); |
369 |
5539 |
*counter += len; |
370 |
5539 |
} else { |
371 |
400 |
AN(ptr); |
372 |
400 |
p = ptr; |
373 |
400 |
final_flags = ftyp->final_flags & flags; |
374 |
400 |
flags &= ~ftyp->final_flags; |
375 |
400 |
do { |
376 |
1360 |
AN(ftyp->continuation); |
377 |
1360 |
if (!ftyp->respect_window) |
378 |
320 |
tf = mfs; |
379 |
1360 |
if (ftyp->respect_window && p != ptr) { |
380 |
1440 |
tf = h2_do_window(wrk, r2, h2, |
381 |
720 |
(len > mfs) ? mfs : len); |
382 |
720 |
if (h2_errcheck(r2, h2) != NULL) |
383 |
160 |
return; |
384 |
560 |
AN(H2_SEND_HELD(h2, r2)); |
385 |
560 |
} |
386 |
1200 |
if (tf < len) { |
387 |
1920 |
H2_Send_Frame(wrk, h2, ftyp, |
388 |
960 |
flags, tf, r2->stream, p); |
389 |
960 |
} else { |
390 |
240 |
if (ftyp->respect_window) |
391 |
160 |
assert(tf == len); |
392 |
240 |
tf = len; |
393 |
480 |
H2_Send_Frame(wrk, h2, ftyp, final_flags, tf, |
394 |
240 |
r2->stream, p); |
395 |
240 |
flags = 0; |
396 |
|
} |
397 |
1200 |
p += tf; |
398 |
1200 |
len -= tf; |
399 |
1200 |
*counter += tf; |
400 |
1200 |
ftyp = ftyp->continuation; |
401 |
1200 |
flags &= ftyp->flags; |
402 |
1200 |
final_flags &= ftyp->flags; |
403 |
1200 |
} while (h2->error == NULL && len > 0); |
404 |
|
} |
405 |
7156 |
} |
406 |
|
|
407 |
|
void |
408 |
2253 |
H2_Send_RST(struct worker *wrk, struct h2_sess *h2, const struct h2_req *r2, |
409 |
|
uint32_t stream, h2_error h2e) |
410 |
|
{ |
411 |
|
char b[4]; |
412 |
|
|
413 |
2253 |
CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); |
414 |
2253 |
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC); |
415 |
2253 |
AN(H2_SEND_HELD(h2, r2)); |
416 |
2253 |
AN(h2e); |
417 |
|
|
418 |
2253 |
H2S_Lock_VSLb(h2, SLT_Debug, "H2: stream %u: %s", stream, h2e->txt); |
419 |
2253 |
vbe32enc(b, h2e->val); |
420 |
|
|
421 |
2253 |
H2_Send_Frame(wrk, h2, H2_F_RST_STREAM, 0, sizeof b, stream, b); |
422 |
2253 |
} |
423 |
|
|
424 |
|
void |
425 |
7156 |
H2_Send(struct worker *wrk, struct h2_req *r2, h2_frame ftyp, uint8_t flags, |
426 |
|
uint32_t len, const void *ptr, uint64_t *counter) |
427 |
|
{ |
428 |
7156 |
uint64_t dummy_counter = 0; |
429 |
|
h2_error h2e; |
430 |
|
|
431 |
7156 |
if (counter == NULL) |
432 |
2138 |
counter = &dummy_counter; |
433 |
|
|
434 |
7156 |
h2_send(wrk, r2, ftyp, flags, len, ptr, counter); |
435 |
|
|
436 |
7156 |
h2e = h2_errcheck(r2, r2->h2sess); |
437 |
7156 |
if (H2_ERROR_MATCH(h2e, H2SE_CANCEL)) |
438 |
200 |
H2_Send_RST(wrk, r2->h2sess, r2, r2->stream, h2e); |
439 |
7156 |
} |