| | varnish-cache/bin/varnishd/http1/cache_http1_line.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2006 Verdens Gang AS |
| 2 |
|
* Copyright (c) 2006-2011 Varnish Software AS |
| 3 |
|
* All rights reserved. |
| 4 |
|
* |
| 5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 6 |
|
* |
| 7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 8 |
|
* |
| 9 |
|
* Redistribution and use in source and binary forms, with or without |
| 10 |
|
* modification, are permitted provided that the following conditions |
| 11 |
|
* are met: |
| 12 |
|
* 1. Redistributions of source code must retain the above copyright |
| 13 |
|
* notice, this list of conditions and the following disclaimer. |
| 14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 15 |
|
* notice, this list of conditions and the following disclaimer in the |
| 16 |
|
* documentation and/or other materials provided with the distribution. |
| 17 |
|
* |
| 18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 |
|
* SUCH DAMAGE. |
| 29 |
|
* |
| 30 |
|
* Write data to fd |
| 31 |
|
* We try to use writev() if possible in order to minimize number of |
| 32 |
|
* syscalls made and packets sent. It also just might allow the worker |
| 33 |
|
* thread to complete the request without holding stuff locked. |
| 34 |
|
* |
| 35 |
|
* XXX: chunked header (generated in Flush) and Tail (EndChunk) |
| 36 |
|
* are not accounted by means of the size_t returned. Obvious ideas: |
| 37 |
|
* - add size_t return value to Flush and EndChunk |
| 38 |
|
* - base accounting on (struct v1l).cnt |
| 39 |
|
*/ |
| 40 |
|
|
| 41 |
|
#include "config.h" |
| 42 |
|
|
| 43 |
|
#include <sys/uio.h> |
| 44 |
|
#include "cache/cache_varnishd.h" |
| 45 |
|
#include "cache/cache_filter.h" |
| 46 |
|
|
| 47 |
|
#include <stdio.h> |
| 48 |
|
|
| 49 |
|
#include "cache_http1.h" |
| 50 |
|
#include "vtim.h" |
| 51 |
|
|
| 52 |
|
/*--------------------------------------------------------------------*/ |
| 53 |
|
|
| 54 |
|
struct v1l { |
| 55 |
|
unsigned magic; |
| 56 |
|
#define V1L_MAGIC 0x2f2142e5 |
| 57 |
|
int *wfd; |
| 58 |
|
stream_close_t werr; /* valid after V1L_Flush() */ |
| 59 |
|
struct iovec *iov; |
| 60 |
|
int siov; |
| 61 |
|
int niov; |
| 62 |
|
size_t liov; |
| 63 |
|
size_t cliov; |
| 64 |
|
int ciov; /* Chunked header marker */ |
| 65 |
|
vtim_real deadline; |
| 66 |
|
struct vsl_log *vsl; |
| 67 |
|
uint64_t cnt; /* Flushed byte count */ |
| 68 |
|
struct ws *ws; |
| 69 |
|
uintptr_t ws_snap; |
| 70 |
|
void **vdp_priv; |
| 71 |
|
}; |
| 72 |
|
|
| 73 |
|
/*-------------------------------------------------------------------- |
| 74 |
|
* for niov == 0, reserve the ws for max number of iovs |
| 75 |
|
* otherwise, up to niov |
| 76 |
|
*/ |
| 77 |
|
|
| 78 |
|
struct v1l * |
| 79 |
212701 |
V1L_Open(struct ws *ws, int *fd, struct vsl_log *vsl, |
| 80 |
|
vtim_real deadline, unsigned niov) |
| 81 |
|
{ |
| 82 |
|
struct v1l *v1l; |
| 83 |
|
unsigned u; |
| 84 |
|
uintptr_t ws_snap; |
| 85 |
|
size_t sz; |
| 86 |
|
|
| 87 |
212701 |
if (WS_Overflowed(ws)) |
| 88 |
0 |
return (NULL); |
| 89 |
|
|
| 90 |
212701 |
if (niov != 0) |
| 91 |
126157 |
assert(niov >= 3); |
| 92 |
|
|
| 93 |
212701 |
ws_snap = WS_Snapshot(ws); |
| 94 |
|
|
| 95 |
212701 |
v1l = WS_Alloc(ws, sizeof *v1l); |
| 96 |
212701 |
if (v1l == NULL) |
| 97 |
40 |
return (NULL); |
| 98 |
212661 |
INIT_OBJ(v1l, V1L_MAGIC); |
| 99 |
|
|
| 100 |
212661 |
v1l->ws = ws; |
| 101 |
212661 |
v1l->ws_snap = ws_snap; |
| 102 |
|
|
| 103 |
212661 |
u = WS_ReserveLumps(ws, sizeof(struct iovec)); |
| 104 |
212661 |
if (u < 3) { |
| 105 |
|
/* Must have at least 3 in case of chunked encoding */ |
| 106 |
0 |
WS_Release(ws, 0); |
| 107 |
0 |
WS_MarkOverflow(ws); |
| 108 |
0 |
return (NULL); |
| 109 |
|
} |
| 110 |
212661 |
if (u > IOV_MAX) |
| 111 |
2880 |
u = IOV_MAX; |
| 112 |
212661 |
if (niov != 0 && u > niov) |
| 113 |
123115 |
u = niov; |
| 114 |
212661 |
v1l->iov = WS_Reservation(ws); |
| 115 |
212661 |
v1l->siov = (int)u; |
| 116 |
212661 |
v1l->ciov = (int)u; |
| 117 |
212661 |
v1l->wfd = fd; |
| 118 |
212661 |
v1l->deadline = deadline; |
| 119 |
212661 |
v1l->vsl = vsl; |
| 120 |
212661 |
v1l->werr = SC_NULL; |
| 121 |
|
|
| 122 |
212661 |
sz = u * sizeof(struct iovec); |
| 123 |
212661 |
assert(sz < UINT_MAX); |
| 124 |
212661 |
WS_Release(ws, (unsigned)sz); |
| 125 |
212661 |
return (v1l); |
| 126 |
212701 |
} |
| 127 |
|
|
| 128 |
|
void |
| 129 |
2880 |
V1L_NoRollback(struct v1l *v1l) |
| 130 |
|
{ |
| 131 |
|
|
| 132 |
2880 |
CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); |
| 133 |
2880 |
v1l->ws_snap = 0; |
| 134 |
2880 |
} |
| 135 |
|
|
| 136 |
|
stream_close_t |
| 137 |
212689 |
V1L_Close(struct v1l **v1lp, uint64_t *cnt) |
| 138 |
|
{ |
| 139 |
|
struct v1l *v1l; |
| 140 |
|
struct ws *ws; |
| 141 |
|
uintptr_t ws_snap; |
| 142 |
|
stream_close_t sc; |
| 143 |
|
|
| 144 |
212689 |
AN(cnt); |
| 145 |
212689 |
TAKE_OBJ_NOTNULL(v1l, v1lp, V1L_MAGIC); |
| 146 |
212689 |
if (v1l->vdp_priv != NULL) { |
| 147 |
167212 |
assert(*v1l->vdp_priv == v1l); |
| 148 |
167212 |
*v1l->vdp_priv = NULL; |
| 149 |
167212 |
} |
| 150 |
212689 |
sc = V1L_Flush(v1l); |
| 151 |
212689 |
*cnt = v1l->cnt; |
| 152 |
212689 |
ws = v1l->ws; |
| 153 |
212689 |
ws_snap = v1l->ws_snap; |
| 154 |
212689 |
ZERO_OBJ(v1l, sizeof *v1l); |
| 155 |
212689 |
if (ws_snap != 0) |
| 156 |
209807 |
WS_Rollback(ws, ws_snap); |
| 157 |
212689 |
return (sc); |
| 158 |
|
} |
| 159 |
|
|
| 160 |
|
static void |
| 161 |
756 |
v1l_prune(struct v1l *v1l, ssize_t abytes) |
| 162 |
|
{ |
| 163 |
756 |
size_t used = 0; |
| 164 |
|
size_t sz, bytes, used_here; |
| 165 |
|
int j; |
| 166 |
|
|
| 167 |
756 |
assert(abytes > 0); |
| 168 |
756 |
bytes = (size_t)abytes; |
| 169 |
|
|
| 170 |
3501 |
for (j = 0; j < v1l->niov; j++) { |
| 171 |
3501 |
if (used + v1l->iov[j].iov_len > bytes) { |
| 172 |
|
/* Cutoff is in this iov */ |
| 173 |
756 |
used_here = bytes - used; |
| 174 |
756 |
v1l->iov[j].iov_len -= used_here; |
| 175 |
756 |
v1l->iov[j].iov_base = |
| 176 |
756 |
(char*)v1l->iov[j].iov_base + used_here; |
| 177 |
756 |
sz = (unsigned)v1l->niov - (unsigned)j; |
| 178 |
756 |
sz *= sizeof(struct iovec); |
| 179 |
756 |
memmove(v1l->iov, &v1l->iov[j], sz); |
| 180 |
756 |
v1l->niov -= j; |
| 181 |
756 |
assert(v1l->liov >= bytes); |
| 182 |
756 |
v1l->liov -= bytes; |
| 183 |
756 |
return; |
| 184 |
|
} |
| 185 |
2745 |
used += v1l->iov[j].iov_len; |
| 186 |
2745 |
} |
| 187 |
0 |
AZ(v1l->liov); |
| 188 |
756 |
} |
| 189 |
|
|
| 190 |
|
stream_close_t |
| 191 |
943832 |
V1L_Flush(struct v1l *v1l) |
| 192 |
|
{ |
| 193 |
|
ssize_t i; |
| 194 |
|
size_t sz; |
| 195 |
|
int err; |
| 196 |
|
char cbuf[32]; |
| 197 |
|
|
| 198 |
943832 |
CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); |
| 199 |
943832 |
CHECK_OBJ_NOTNULL(v1l->werr, STREAM_CLOSE_MAGIC); |
| 200 |
943832 |
AN(v1l->wfd); |
| 201 |
|
|
| 202 |
943832 |
assert(v1l->niov <= v1l->siov); |
| 203 |
|
|
| 204 |
943832 |
if (*v1l->wfd >= 0 && v1l->liov > 0 && v1l->werr == SC_NULL) { |
| 205 |
804735 |
if (v1l->ciov < v1l->siov && v1l->cliov > 0) { |
| 206 |
|
/* Add chunk head & tail */ |
| 207 |
43240 |
bprintf(cbuf, "00%zx\r\n", v1l->cliov); |
| 208 |
43240 |
sz = strlen(cbuf); |
| 209 |
43240 |
v1l->iov[v1l->ciov].iov_base = cbuf; |
| 210 |
43240 |
v1l->iov[v1l->ciov].iov_len = sz; |
| 211 |
43240 |
v1l->liov += sz; |
| 212 |
|
|
| 213 |
|
/* This is OK, because siov was --'ed */ |
| 214 |
43240 |
v1l->iov[v1l->niov].iov_base = cbuf + sz - 2; |
| 215 |
43240 |
v1l->iov[v1l->niov++].iov_len = 2; |
| 216 |
43240 |
v1l->liov += 2; |
| 217 |
804735 |
} else if (v1l->ciov < v1l->siov) { |
| 218 |
2705 |
v1l->iov[v1l->ciov].iov_base = cbuf; |
| 219 |
2705 |
v1l->iov[v1l->ciov].iov_len = 0; |
| 220 |
2705 |
} |
| 221 |
|
|
| 222 |
804735 |
i = 0; |
| 223 |
804735 |
err = 0; |
| 224 |
804735 |
do { |
| 225 |
806619 |
if (VTIM_real() > v1l->deadline) { |
| 226 |
320 |
VSLb(v1l->vsl, SLT_Debug, |
| 227 |
|
"Hit total send timeout, " |
| 228 |
|
"wrote = %zd/%zd; not retrying", |
| 229 |
160 |
i, v1l->liov); |
| 230 |
160 |
i = -1; |
| 231 |
160 |
break; |
| 232 |
|
} |
| 233 |
|
|
| 234 |
806459 |
i = writev(*v1l->wfd, v1l->iov, v1l->niov); |
| 235 |
806459 |
if (i > 0) { |
| 236 |
805058 |
v1l->cnt += (size_t)i; |
| 237 |
805058 |
if ((size_t)i == v1l->liov) |
| 238 |
804302 |
break; |
| 239 |
756 |
} |
| 240 |
|
|
| 241 |
|
/* we hit a timeout, and some data may have been sent: |
| 242 |
|
* Remove sent data from start of I/O vector, then retry |
| 243 |
|
* |
| 244 |
|
* XXX: Add a "minimum sent data per timeout counter to |
| 245 |
|
* prevent slowloris attacks |
| 246 |
|
*/ |
| 247 |
|
|
| 248 |
2157 |
err = errno; |
| 249 |
|
|
| 250 |
2157 |
if (err == EWOULDBLOCK) { |
| 251 |
2162 |
VSLb(v1l->vsl, SLT_Debug, |
| 252 |
|
"Hit idle send timeout, " |
| 253 |
|
"wrote = %zd/%zd; retrying", |
| 254 |
1081 |
i, v1l->liov); |
| 255 |
1081 |
} |
| 256 |
|
|
| 257 |
2157 |
if (i > 0) |
| 258 |
753 |
v1l_prune(v1l, i); |
| 259 |
2157 |
} while (i > 0 || err == EWOULDBLOCK); |
| 260 |
|
|
| 261 |
804735 |
if (i <= 0) { |
| 262 |
960 |
VSLb(v1l->vsl, SLT_Debug, |
| 263 |
|
"Write error, retval = %zd, len = %zd, errno = %s", |
| 264 |
480 |
i, v1l->liov, VAS_errtxt(err)); |
| 265 |
480 |
assert(v1l->werr == SC_NULL); |
| 266 |
480 |
if (err == EPIPE) |
| 267 |
318 |
v1l->werr = SC_REM_CLOSE; |
| 268 |
|
else |
| 269 |
162 |
v1l->werr = SC_TX_ERROR; |
| 270 |
480 |
errno = err; |
| 271 |
480 |
} |
| 272 |
804735 |
} |
| 273 |
943874 |
v1l->liov = 0; |
| 274 |
943874 |
v1l->cliov = 0; |
| 275 |
943874 |
v1l->niov = 0; |
| 276 |
943874 |
if (v1l->ciov < v1l->siov) |
| 277 |
84483 |
v1l->ciov = v1l->niov++; |
| 278 |
943840 |
CHECK_OBJ_NOTNULL(v1l->werr, STREAM_CLOSE_MAGIC); |
| 279 |
943840 |
return (v1l->werr); |
| 280 |
|
} |
| 281 |
|
|
| 282 |
|
size_t |
| 283 |
5299534 |
V1L_Write(struct v1l *v1l, const void *ptr, ssize_t alen) |
| 284 |
|
{ |
| 285 |
5299534 |
size_t len = 0; |
| 286 |
|
|
| 287 |
5299534 |
CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); |
| 288 |
5299534 |
AN(v1l->wfd); |
| 289 |
5299534 |
if (alen == 0 || *v1l->wfd < 0) |
| 290 |
906 |
return (0); |
| 291 |
5299534 |
if (alen > 0) |
| 292 |
2876513 |
len = (size_t)alen; |
| 293 |
2423021 |
else if (alen == -1) |
| 294 |
2423021 |
len = strlen(ptr); |
| 295 |
|
else |
| 296 |
0 |
WRONG("alen"); |
| 297 |
|
|
| 298 |
5299534 |
assert(v1l->niov < v1l->siov); |
| 299 |
5299534 |
v1l->iov[v1l->niov].iov_base = TRUST_ME(ptr); |
| 300 |
5299534 |
v1l->iov[v1l->niov].iov_len = len; |
| 301 |
5299534 |
v1l->liov += len; |
| 302 |
5299534 |
v1l->niov++; |
| 303 |
5299534 |
v1l->cliov += len; |
| 304 |
5299534 |
if (v1l->niov >= v1l->siov) { |
| 305 |
4400 |
(void)V1L_Flush(v1l); |
| 306 |
4400 |
VSC_C_main->http1_iovs_flush++; |
| 307 |
4400 |
} |
| 308 |
5299534 |
return (len); |
| 309 |
5299534 |
} |
| 310 |
|
|
| 311 |
|
void |
| 312 |
11740 |
V1L_Chunked(struct v1l *v1l) |
| 313 |
|
{ |
| 314 |
|
|
| 315 |
11740 |
CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); |
| 316 |
|
|
| 317 |
11740 |
assert(v1l->ciov == v1l->siov); |
| 318 |
11740 |
assert(v1l->siov >= 3); |
| 319 |
|
/* |
| 320 |
|
* If there is no space for chunked header, a chunk of data and |
| 321 |
|
* a chunk tail, we might as well flush right away. |
| 322 |
|
*/ |
| 323 |
11740 |
if (v1l->niov + 3 >= v1l->siov) { |
| 324 |
0 |
(void)V1L_Flush(v1l); |
| 325 |
0 |
VSC_C_main->http1_iovs_flush++; |
| 326 |
0 |
} |
| 327 |
11740 |
v1l->siov--; |
| 328 |
11740 |
v1l->ciov = v1l->niov++; |
| 329 |
11740 |
v1l->cliov = 0; |
| 330 |
11740 |
assert(v1l->ciov < v1l->siov); |
| 331 |
11740 |
assert(v1l->niov < v1l->siov); |
| 332 |
11740 |
} |
| 333 |
|
|
| 334 |
|
/* |
| 335 |
|
* XXX: It is not worth the complexity to attempt to get the |
| 336 |
|
* XXX: end of chunk into the V1L_Flush(), because most of the time |
| 337 |
|
* XXX: if not always, that is a no-op anyway, because the calling |
| 338 |
|
* XXX: code already called V1L_Flush() to release local storage. |
| 339 |
|
*/ |
| 340 |
|
|
| 341 |
|
void |
| 342 |
10895 |
V1L_EndChunk(struct v1l *v1l) |
| 343 |
|
{ |
| 344 |
|
|
| 345 |
10895 |
CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); |
| 346 |
|
|
| 347 |
10895 |
assert(v1l->ciov < v1l->siov); |
| 348 |
10895 |
(void)V1L_Flush(v1l); |
| 349 |
10895 |
v1l->siov++; |
| 350 |
10895 |
v1l->ciov = v1l->siov; |
| 351 |
10895 |
v1l->niov = 0; |
| 352 |
10895 |
v1l->cliov = 0; |
| 353 |
10895 |
(void)V1L_Write(v1l, "0\r\n\r\n", -1); |
| 354 |
10895 |
} |
| 355 |
|
|
| 356 |
|
/*-------------------------------------------------------------------- |
| 357 |
|
* VDP using V1L |
| 358 |
|
*/ |
| 359 |
|
|
| 360 |
|
/* remember priv pointer for V1L_Close() to clear */ |
| 361 |
|
static int v_matchproto_(vdp_init_f) |
| 362 |
165943 |
v1l_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) |
| 363 |
|
{ |
| 364 |
|
struct v1l *v1l; |
| 365 |
|
|
| 366 |
165943 |
(void) ctx; |
| 367 |
165943 |
(void) vdc; |
| 368 |
165943 |
AN(priv); |
| 369 |
165943 |
CAST_OBJ_NOTNULL(v1l, *priv, V1L_MAGIC); |
| 370 |
|
|
| 371 |
165943 |
v1l->vdp_priv = priv; |
| 372 |
165943 |
return (0); |
| 373 |
|
} |
| 374 |
|
|
| 375 |
|
static int v_matchproto_(vdp_bytes_f) |
| 376 |
724503 |
v1l_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
| 377 |
|
const void *ptr, ssize_t len) |
| 378 |
|
{ |
| 379 |
724503 |
size_t wl = 0; |
| 380 |
|
|
| 381 |
724503 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 382 |
724503 |
AN(priv); |
| 383 |
|
|
| 384 |
724503 |
AZ(vdc->nxt); /* always at the bottom of the pile */ |
| 385 |
|
|
| 386 |
724503 |
if (len > 0) |
| 387 |
669511 |
wl = V1L_Write(*priv, ptr, len); |
| 388 |
724503 |
if (act > VDP_NULL && V1L_Flush(*priv) != SC_NULL) |
| 389 |
477 |
return (-1); |
| 390 |
724026 |
if ((size_t)len != wl) |
| 391 |
0 |
return (-1); |
| 392 |
724026 |
return (0); |
| 393 |
724503 |
} |
| 394 |
|
|
| 395 |
|
/*-------------------------------------------------------------------- |
| 396 |
|
* VDPIO using V1L |
| 397 |
|
* |
| 398 |
|
* this is deliverately half-baked to reduce work in progress while heading |
| 399 |
|
* towards VAI/VDPIO: we update the v1l with the scarab, which we |
| 400 |
|
* return unmodified. |
| 401 |
|
* |
| 402 |
|
*/ |
| 403 |
|
|
| 404 |
|
/* remember priv pointer for V1L_Close() to clear */ |
| 405 |
|
static int v_matchproto_(vpio_init_f) |
| 406 |
1280 |
v1l_io_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) |
| 407 |
|
{ |
| 408 |
|
struct v1l *v1l; |
| 409 |
|
|
| 410 |
1280 |
(void) ctx; |
| 411 |
1280 |
(void) vdc; |
| 412 |
1280 |
AN(priv); |
| 413 |
|
|
| 414 |
1280 |
CAST_OBJ_NOTNULL(v1l, *priv, V1L_MAGIC); |
| 415 |
|
|
| 416 |
1280 |
v1l->vdp_priv = priv; |
| 417 |
1280 |
return (capacity); |
| 418 |
|
} |
| 419 |
|
|
| 420 |
|
static int v_matchproto_(vpio_init_f) |
| 421 |
0 |
v1l_io_upgrade(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) |
| 422 |
|
{ |
| 423 |
0 |
return (v1l_io_init(ctx, vdc, priv, capacity)); |
| 424 |
|
} |
| 425 |
|
|
| 426 |
|
/* |
| 427 |
|
* API note |
| 428 |
|
* |
| 429 |
|
* this VDP is special in that it does not transform data, but prepares |
| 430 |
|
* the write. From the perspective of VDPIO, its current state is only |
| 431 |
|
* transitional. |
| 432 |
|
* |
| 433 |
|
* Because the VDP prepares the actual writes, but the caller needs |
| 434 |
|
* to return the scarab's leases, the caller in this case is |
| 435 |
|
* required to empty the scarab after V1L_Flush()'ing. |
| 436 |
|
*/ |
| 437 |
|
|
| 438 |
|
static int v_matchproto_(vdpio_lease_f) |
| 439 |
2800 |
v1l_io_lease(struct vdp_ctx *vdc, struct vdp_entry *this, struct vscarab *scarab) |
| 440 |
|
{ |
| 441 |
|
struct v1l *v1l; |
| 442 |
|
struct viov *v; |
| 443 |
|
int r; |
| 444 |
|
|
| 445 |
2800 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 446 |
2800 |
CHECK_OBJ_NOTNULL(this, VDP_ENTRY_MAGIC); |
| 447 |
2800 |
CAST_OBJ_NOTNULL(v1l, this->priv, V1L_MAGIC); |
| 448 |
2800 |
VSCARAB_CHECK(scarab); |
| 449 |
2800 |
AZ(scarab->used); // see note above |
| 450 |
2800 |
this->calls++; |
| 451 |
2800 |
r = vdpio_pull(vdc, this, scarab); |
| 452 |
2800 |
if (r < 0) |
| 453 |
120 |
return (r); |
| 454 |
5240 |
VSCARAB_FOREACH(v, scarab) |
| 455 |
2560 |
this->bytes_in += V1L_Write(v1l, v->iov.iov_base, v->iov.iov_len); |
| 456 |
2680 |
return (r); |
| 457 |
2800 |
} |
| 458 |
|
|
| 459 |
|
const struct vdp * const VDP_v1l = &(struct vdp){ |
| 460 |
|
.name = "V1B", |
| 461 |
|
.init = v1l_init, |
| 462 |
|
.bytes = v1l_bytes, |
| 463 |
|
|
| 464 |
|
.io_init = v1l_io_init, |
| 465 |
|
.io_upgrade = v1l_io_upgrade, |
| 466 |
|
.io_lease = v1l_io_lease, |
| 467 |
|
}; |