| | varnish-cache/bin/varnishd/cache/cache_deliver_proc.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2006 Verdens Gang AS |
| 2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
| 3 |
|
* All rights reserved. |
| 4 |
|
* |
| 5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 6 |
|
* |
| 7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 8 |
|
* |
| 9 |
|
* Redistribution and use in source and binary forms, with or without |
| 10 |
|
* modification, are permitted provided that the following conditions |
| 11 |
|
* are met: |
| 12 |
|
* 1. Redistributions of source code must retain the above copyright |
| 13 |
|
* notice, this list of conditions and the following disclaimer. |
| 14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 15 |
|
* notice, this list of conditions and the following disclaimer in the |
| 16 |
|
* documentation and/or other materials provided with the distribution. |
| 17 |
|
* |
| 18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 |
|
* SUCH DAMAGE. |
| 29 |
|
*/ |
| 30 |
|
|
| 31 |
|
#include "config.h" |
| 32 |
|
|
| 33 |
|
#include "cache_varnishd.h" |
| 34 |
|
#include "cache_filter.h" |
| 35 |
|
#include "cache_objhead.h" |
| 36 |
|
|
| 37 |
|
void |
| 38 |
240 |
VDP_Panic(struct vsb *vsb, const struct vdp_ctx *vdc) |
| 39 |
|
{ |
| 40 |
|
struct vdp_entry *vde; |
| 41 |
|
|
| 42 |
240 |
if (PAN_dump_struct(vsb, vdc, VDP_CTX_MAGIC, "vdc")) |
| 43 |
160 |
return; |
| 44 |
80 |
VSB_printf(vsb, "nxt = %p,\n", vdc->nxt); |
| 45 |
80 |
VSB_printf(vsb, "retval = %d,\n", vdc->retval); |
| 46 |
|
|
| 47 |
80 |
if (!VTAILQ_EMPTY(&vdc->vdp)) { |
| 48 |
40 |
VSB_cat(vsb, "filters = {\n"); |
| 49 |
40 |
VSB_indent(vsb, 2); |
| 50 |
200 |
VTAILQ_FOREACH(vde, &vdc->vdp, list) |
| 51 |
320 |
VSB_printf(vsb, "%s = %p { priv = %p }\n", |
| 52 |
160 |
vde->vdp->name, vde, vde->priv); |
| 53 |
40 |
VSB_indent(vsb, -2); |
| 54 |
40 |
VSB_cat(vsb, "},\n"); |
| 55 |
40 |
} |
| 56 |
|
|
| 57 |
80 |
VSB_indent(vsb, -2); |
| 58 |
80 |
VSB_cat(vsb, "},\n"); |
| 59 |
240 |
} |
| 60 |
|
|
| 61 |
|
/* |
| 62 |
|
* Ensure that transports have called VDP_Close() |
| 63 |
|
* to avoid leaks in VDPs |
| 64 |
|
*/ |
| 65 |
|
void |
| 66 |
166122 |
VDP_Fini(const struct vdp_ctx *vdc) |
| 67 |
|
{ |
| 68 |
166122 |
assert(VTAILQ_EMPTY(&vdc->vdp)); |
| 69 |
166122 |
} |
| 70 |
|
|
| 71 |
|
void |
| 72 |
234229 |
VDP_Init(struct vdp_ctx *vdc, struct worker *wrk, struct vsl_log *vsl, |
| 73 |
|
const struct req *req, const struct busyobj *bo, intmax_t *clen) |
| 74 |
|
{ |
| 75 |
234229 |
AN(vdc); |
| 76 |
234229 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 77 |
234229 |
AN(vsl); |
| 78 |
|
|
| 79 |
234229 |
AN(clen); |
| 80 |
|
|
| 81 |
234229 |
assert((req ? 1 : 0) ^ (bo ? 1 : 0)); |
| 82 |
|
|
| 83 |
234229 |
AN(clen); |
| 84 |
234229 |
assert(*clen >= -1); |
| 85 |
|
|
| 86 |
234229 |
INIT_OBJ(vdc, VDP_CTX_MAGIC); |
| 87 |
234229 |
VTAILQ_INIT(&vdc->vdp); |
| 88 |
234229 |
vdc->wrk = wrk; |
| 89 |
234229 |
vdc->vsl = vsl; |
| 90 |
234229 |
vdc->clen = clen; |
| 91 |
|
|
| 92 |
234229 |
if (req != NULL) { |
| 93 |
147634 |
CHECK_OBJ(req, REQ_MAGIC); |
| 94 |
147634 |
vdc->oc = req->objcore; |
| 95 |
147634 |
vdc->hp = req->resp; |
| 96 |
147634 |
} |
| 97 |
|
else { |
| 98 |
86595 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
| 99 |
86595 |
vdc->oc = bo->bereq_body; |
| 100 |
86595 |
vdc->hp = bo->bereq; |
| 101 |
|
} |
| 102 |
234229 |
} |
| 103 |
|
|
| 104 |
|
/* VDP_bytes |
| 105 |
|
* |
| 106 |
|
* Pushes len bytes at ptr down the delivery processor list. |
| 107 |
|
* |
| 108 |
|
* This function picks and calls the next delivery processor from the |
| 109 |
|
* list. The return value is the return value of the delivery |
| 110 |
|
* processor. Upon seeing a non-zero return value, that lowest value |
| 111 |
|
* observed is latched in ->retval and all subsequent calls to |
| 112 |
|
* VDP_bytes will return that value directly without calling the next |
| 113 |
|
* processor. |
| 114 |
|
* |
| 115 |
|
* VDP_END marks the end of successful processing, it is issued by |
| 116 |
|
* VDP_DeliverObj() and may also be sent downstream by processors ending the |
| 117 |
|
* stream (for return value != 0) |
| 118 |
|
* |
| 119 |
|
* VDP_END must at most be received once per processor, so any VDP sending it |
| 120 |
|
* downstream must itself not forward it a second time. |
| 121 |
|
* |
| 122 |
|
* Valid return values (of VDP_bytes and any VDP function): |
| 123 |
|
* r < 0: Error, breaks out early on an error condition |
| 124 |
|
* r == 0: Continue |
| 125 |
|
* r > 0: Stop, breaks out early without error condition |
| 126 |
|
*/ |
| 127 |
|
|
| 128 |
|
int |
| 129 |
852415 |
VDP_bytes(struct vdp_ctx *vdc, enum vdp_action act, |
| 130 |
|
const void *ptr, ssize_t len) |
| 131 |
|
{ |
| 132 |
|
int retval; |
| 133 |
|
struct vdp_entry *vdpe; |
| 134 |
|
|
| 135 |
852415 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 136 |
852415 |
if (vdc->retval) |
| 137 |
953 |
return (vdc->retval); |
| 138 |
851462 |
vdpe = vdc->nxt; |
| 139 |
851462 |
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC); |
| 140 |
|
|
| 141 |
|
/* at most one VDP_END call */ |
| 142 |
851462 |
assert(vdpe->end == VDP_NULL); |
| 143 |
|
|
| 144 |
851462 |
if (act == VDP_NULL) |
| 145 |
26641 |
assert(len > 0); |
| 146 |
824821 |
else if (act == VDP_END) |
| 147 |
113690 |
vdpe->end = VDP_END; |
| 148 |
|
else |
| 149 |
711131 |
assert(act == VDP_FLUSH); |
| 150 |
|
|
| 151 |
|
/* Call the present layer, while pointing to the next layer down */ |
| 152 |
851462 |
vdc->nxt = VTAILQ_NEXT(vdpe, list); |
| 153 |
851462 |
vdpe->calls++; |
| 154 |
851462 |
vdc->bytes_done = len; |
| 155 |
851462 |
retval = vdpe->vdp->bytes(vdc, act, &vdpe->priv, ptr, len); |
| 156 |
851462 |
vdpe->bytes_in += vdc->bytes_done; |
| 157 |
851462 |
if (retval && (vdc->retval == 0 || retval < vdc->retval)) |
| 158 |
2216 |
vdc->retval = retval; /* Latch error value */ |
| 159 |
851462 |
vdc->nxt = vdpe; |
| 160 |
851462 |
return (vdc->retval); |
| 161 |
852415 |
} |
| 162 |
|
|
| 163 |
|
int |
| 164 |
211610 |
VDP_Push(VRT_CTX, struct vdp_ctx *vdc, struct ws *ws, const struct vdp *vdp, |
| 165 |
|
void *priv) |
| 166 |
|
{ |
| 167 |
|
struct vdp_entry *vdpe; |
| 168 |
|
|
| 169 |
211610 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 170 |
211610 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 171 |
211610 |
CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); |
| 172 |
211610 |
CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); |
| 173 |
211610 |
AN(vdc->clen); |
| 174 |
211610 |
assert(*vdc->clen >= -1); |
| 175 |
211610 |
AN(ws); |
| 176 |
211610 |
AN(vdp); |
| 177 |
211610 |
AN(vdp->name); |
| 178 |
|
|
| 179 |
211610 |
if (vdc->retval) |
| 180 |
0 |
return (vdc->retval); |
| 181 |
|
|
| 182 |
211610 |
if (DO_DEBUG(DBG_PROCESSORS)) |
| 183 |
1960 |
VSLb(vdc->vsl, SLT_Debug, "VDP_push(%s)", vdp->name); |
| 184 |
|
|
| 185 |
211610 |
vdpe = WS_Alloc(ws, sizeof *vdpe); |
| 186 |
211610 |
if (vdpe == NULL) { |
| 187 |
5120 |
AZ(vdc->retval); |
| 188 |
5120 |
vdc->retval = -1; |
| 189 |
5120 |
return (vdc->retval); |
| 190 |
|
} |
| 191 |
206490 |
INIT_OBJ(vdpe, VDP_ENTRY_MAGIC); |
| 192 |
206490 |
vdpe->vdp = vdp; |
| 193 |
206490 |
vdpe->priv = priv; |
| 194 |
206490 |
VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list); |
| 195 |
206490 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
| 196 |
|
|
| 197 |
206490 |
AZ(vdc->retval); |
| 198 |
206490 |
if (vdpe->vdp->init != NULL) |
| 199 |
194084 |
vdc->retval = vdpe->vdp->init(ctx, vdc, &vdpe->priv); |
| 200 |
206490 |
vdc->oc = NULL; |
| 201 |
|
|
| 202 |
206490 |
if (vdc->retval > 0) { |
| 203 |
1920 |
VTAILQ_REMOVE(&vdc->vdp, vdpe, list); |
| 204 |
1920 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
| 205 |
1920 |
vdc->retval = 0; |
| 206 |
1920 |
} |
| 207 |
204570 |
else if (vdc->retval == 0) |
| 208 |
204252 |
AN(vdp->bytes); |
| 209 |
206490 |
return (vdc->retval); |
| 210 |
211610 |
} |
| 211 |
|
|
| 212 |
|
uint64_t |
| 213 |
234232 |
VDP_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) |
| 214 |
|
{ |
| 215 |
|
struct vdp_entry *vdpe; |
| 216 |
234232 |
uint64_t rv = 0; |
| 217 |
|
|
| 218 |
234232 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 219 |
234232 |
CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC); |
| 220 |
234232 |
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); |
| 221 |
234232 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
| 222 |
|
|
| 223 |
438017 |
while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) { |
| 224 |
203785 |
CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC); |
| 225 |
203785 |
rv = vdpe->bytes_in; |
| 226 |
407570 |
VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name, |
| 227 |
203785 |
(uintmax_t)vdpe->calls, (uintmax_t)rv); |
| 228 |
203785 |
if (vdpe->vdp->fini != NULL) |
| 229 |
37723 |
AZ(vdpe->vdp->fini(vdc, &vdpe->priv)); |
| 230 |
203785 |
AZ(vdpe->priv); |
| 231 |
203785 |
VTAILQ_REMOVE(&vdc->vdp, vdpe, list); |
| 232 |
203785 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
| 233 |
|
#ifdef VDP_PEDANTIC_ARMED |
| 234 |
|
// enable when we are confident to get VDP_END right |
| 235 |
|
if (vdc->nxt == NULL && vdc->retval >= 0) |
| 236 |
|
assert(vdpe->end == VDP_END); |
| 237 |
|
#endif |
| 238 |
|
} |
| 239 |
234232 |
if (oc != NULL) |
| 240 |
147603 |
HSH_Cancel(vdc->wrk, oc, boc); |
| 241 |
234232 |
return (rv); |
| 242 |
|
} |
| 243 |
|
|
| 244 |
|
/*--------------------------------------------------------------------*/ |
| 245 |
|
|
| 246 |
|
/* |
| 247 |
|
* Push a VDPIO vdp. This can only be used with only vdpio-enabled VDPs or |
| 248 |
|
* after a successful upgrade |
| 249 |
|
*/ |
| 250 |
|
int |
| 251 |
2560 |
VDPIO_Push(VRT_CTX, struct vdp_ctx *vdc, struct ws *ws, const struct vdp *vdp, |
| 252 |
|
void *priv) |
| 253 |
|
{ |
| 254 |
|
struct vdp_entry *vdpe; |
| 255 |
|
int r; |
| 256 |
|
|
| 257 |
2560 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 258 |
2560 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 259 |
2560 |
CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); |
| 260 |
2560 |
CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); |
| 261 |
2560 |
AN(vdc->clen); |
| 262 |
2560 |
assert(*vdc->clen >= -1); |
| 263 |
2560 |
AN(ws); |
| 264 |
2560 |
AN(vdp); |
| 265 |
2560 |
AN(vdp->name); |
| 266 |
|
|
| 267 |
2560 |
if (vdc->retval < 0) |
| 268 |
0 |
return (vdc->retval); |
| 269 |
|
|
| 270 |
2560 |
AN(vdp->io_init); |
| 271 |
|
|
| 272 |
|
// the first VDP (which leases from storage) only gets the minimum |
| 273 |
|
// capacity requirement of 1 |
| 274 |
2560 |
if (vdc->retval == 0) { |
| 275 |
0 |
assert(VTAILQ_EMPTY(&vdc->vdp)); |
| 276 |
0 |
vdc->retval = 1; |
| 277 |
0 |
} |
| 278 |
|
|
| 279 |
2560 |
if (DO_DEBUG(DBG_PROCESSORS)) |
| 280 |
2560 |
VSLb(vdc->vsl, SLT_Debug, "VDPIO_push(%s)", vdp->name); |
| 281 |
|
|
| 282 |
2560 |
vdpe = WS_Alloc(ws, sizeof *vdpe); |
| 283 |
2560 |
if (vdpe == NULL) { |
| 284 |
0 |
vdc->retval = -ENOMEM; |
| 285 |
0 |
return (vdc->retval); |
| 286 |
|
} |
| 287 |
2560 |
INIT_OBJ(vdpe, VDP_ENTRY_MAGIC); |
| 288 |
2560 |
vdpe->vdp = vdp; |
| 289 |
2560 |
vdpe->priv = priv; |
| 290 |
2560 |
VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list); |
| 291 |
2560 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
| 292 |
|
|
| 293 |
2560 |
assert(vdc->retval > 0); |
| 294 |
2560 |
if (vdpe->vdp->io_init != NULL) { |
| 295 |
2560 |
r = vdpe->vdp->io_init(ctx, vdc, &vdpe->priv, vdc->retval); |
| 296 |
2560 |
if (r <= 0) { |
| 297 |
0 |
VTAILQ_REMOVE(&vdc->vdp, vdpe, list); |
| 298 |
0 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
| 299 |
0 |
} |
| 300 |
|
else |
| 301 |
2560 |
AN(vdp->io_lease); |
| 302 |
2560 |
if (r != 0) |
| 303 |
2560 |
vdc->retval = r; |
| 304 |
2560 |
} |
| 305 |
2560 |
vdc->oc = NULL; |
| 306 |
2560 |
return (vdc->retval); |
| 307 |
2560 |
} |
| 308 |
|
|
| 309 |
|
/* |
| 310 |
|
* upgrade an already initialized VDP filter chain to VDPIO, if possible |
| 311 |
|
* returns: |
| 312 |
|
* > 0 cap |
| 313 |
|
* -ENOTSUP io_upgrade missing for at least one filter |
| 314 |
|
* vdc->retval if < 0 |
| 315 |
|
*/ |
| 316 |
|
int |
| 317 |
1600 |
VDPIO_Upgrade(VRT_CTX, struct vdp_ctx *vdc) |
| 318 |
|
{ |
| 319 |
|
struct vdp_entry *vdpe; |
| 320 |
|
int cap, r; |
| 321 |
|
|
| 322 |
1600 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
| 323 |
1600 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 324 |
|
|
| 325 |
2560 |
VTAILQ_FOREACH(vdpe, &vdc->vdp, list) |
| 326 |
1280 |
if (vdpe->vdp->io_upgrade == NULL) |
| 327 |
320 |
return (-ENOTSUP); |
| 328 |
|
|
| 329 |
1280 |
if (vdc->retval < 0) |
| 330 |
0 |
return (vdc->retval); |
| 331 |
|
|
| 332 |
|
// minimum capacity requirement for the first filter (after storage) |
| 333 |
1280 |
r = cap = 1; |
| 334 |
1920 |
VTAILQ_FOREACH(vdpe, &vdc->vdp, list) { |
| 335 |
640 |
r = vdpe->vdp->io_upgrade(ctx, vdc, &vdpe->priv, cap); |
| 336 |
640 |
if (DO_DEBUG(DBG_PROCESSORS)) { |
| 337 |
1280 |
VSLb(vdc->vsl, SLT_Debug, "VDPIO_Upgrade " |
| 338 |
|
"%d = %s(cap = %d)", |
| 339 |
640 |
r, vdpe->vdp->name, cap); |
| 340 |
640 |
} |
| 341 |
640 |
if (r < 0) |
| 342 |
0 |
return ((vdc->retval = r)); |
| 343 |
|
// XXX remove if filter does not want to be pushed? |
| 344 |
640 |
assert(r != 0); |
| 345 |
640 |
cap = r; |
| 346 |
640 |
} |
| 347 |
1280 |
return ((vdc->retval = r)); |
| 348 |
1600 |
} |
| 349 |
|
|
| 350 |
|
uint64_t |
| 351 |
3200 |
VDPIO_Close1(struct vdp_ctx *vdc, struct vdp_entry *vdpe) |
| 352 |
|
{ |
| 353 |
|
uint64_t rv; |
| 354 |
|
|
| 355 |
3200 |
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC); |
| 356 |
3200 |
rv = vdpe->bytes_in; |
| 357 |
6400 |
VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name, |
| 358 |
3200 |
(uintmax_t)vdpe->calls, (uintmax_t)rv); |
| 359 |
3200 |
if (vdpe->vdp->io_fini != NULL) |
| 360 |
640 |
vdpe->vdp->io_fini(vdc, &vdpe->priv); |
| 361 |
3200 |
AZ(vdpe->priv); |
| 362 |
3200 |
VTAILQ_REMOVE(&vdc->vdp, vdpe, list); |
| 363 |
3200 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
| 364 |
3200 |
return (rv); |
| 365 |
|
} |
| 366 |
|
|
| 367 |
|
uint64_t |
| 368 |
1280 |
VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) |
| 369 |
|
{ |
| 370 |
|
struct vdp_entry *vdpe; |
| 371 |
1280 |
uint64_t rv = 0; |
| 372 |
|
|
| 373 |
1280 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 374 |
1280 |
CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC); |
| 375 |
1280 |
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); |
| 376 |
1280 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
| 377 |
|
|
| 378 |
3198 |
while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) |
| 379 |
1918 |
rv = VDPIO_Close1(vdc, vdpe); |
| 380 |
|
|
| 381 |
1280 |
if (oc != NULL) |
| 382 |
1280 |
HSH_Cancel(vdc->wrk, oc, boc); |
| 383 |
1280 |
return (rv); |
| 384 |
|
} |
| 385 |
|
|
| 386 |
|
/* |
| 387 |
|
* ============================================================ |
| 388 |
|
* VDPIO helpers: VAI management |
| 389 |
|
* |
| 390 |
|
* Transports should not need to talk to the VAI Object interface directly, |
| 391 |
|
* because its state is kept in the vdp_ctx |
| 392 |
|
* |
| 393 |
|
* So we wrap init, return and fini |
| 394 |
|
*/ |
| 395 |
|
|
| 396 |
|
// return true if error |
| 397 |
|
int |
| 398 |
1280 |
VDPIO_Init(struct vdp_ctx *vdc, struct objcore *oc, struct ws *ws, |
| 399 |
|
vai_notify_cb *notify_cb, void *notify_priv, struct vscaret *scaret) |
| 400 |
|
{ |
| 401 |
1280 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 402 |
1280 |
VSCARET_CHECK_NOTNULL(scaret); |
| 403 |
1280 |
AN(scaret->capacity); |
| 404 |
1280 |
AZ(scaret->used); |
| 405 |
|
|
| 406 |
1280 |
AZ(vdc->vai_hdl); |
| 407 |
1280 |
vdc->vai_hdl = ObjVAIinit(vdc->wrk, oc, ws, notify_cb, notify_priv); |
| 408 |
1280 |
if (vdc->vai_hdl == NULL) |
| 409 |
0 |
return (1); |
| 410 |
1280 |
vdc->scaret = scaret; |
| 411 |
1280 |
return (0); |
| 412 |
1280 |
} |
| 413 |
|
|
| 414 |
|
// return leases stashed in scaret |
| 415 |
|
void |
| 416 |
1400 |
VDPIO_Return(const struct vdp_ctx *vdc) |
| 417 |
|
{ |
| 418 |
1400 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 419 |
|
|
| 420 |
1400 |
ObjVAIreturn(vdc->wrk, vdc->vai_hdl, vdc->scaret); |
| 421 |
1400 |
} |
| 422 |
|
|
| 423 |
|
void |
| 424 |
1280 |
VDPIO_Fini(struct vdp_ctx *vdc) |
| 425 |
|
{ |
| 426 |
1280 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 427 |
|
|
| 428 |
1280 |
VDPIO_Return(vdc); |
| 429 |
1280 |
ObjVAIfini(vdc->wrk, &vdc->vai_hdl); |
| 430 |
1280 |
} |
| 431 |
|
|
| 432 |
|
/*--------------------------------------------------------------------*/ |
| 433 |
|
int v_matchproto_(objiterate_f) |
| 434 |
168199 |
VDP_ObjIterate(void *priv, unsigned flush, const void *ptr, ssize_t len) |
| 435 |
|
{ |
| 436 |
|
enum vdp_action act; |
| 437 |
|
|
| 438 |
168199 |
if (flush == 0) |
| 439 |
15634 |
act = VDP_NULL; |
| 440 |
152565 |
else if ((flush & OBJ_ITER_END) != 0) |
| 441 |
97801 |
act = VDP_END; |
| 442 |
|
else |
| 443 |
54764 |
act = VDP_FLUSH; |
| 444 |
|
|
| 445 |
168199 |
return (VDP_bytes(priv, act, ptr, len)); |
| 446 |
|
} |
| 447 |
|
|
| 448 |
|
|
| 449 |
|
int |
| 450 |
95280 |
VDP_DeliverObj(struct vdp_ctx *vdc, struct objcore *oc) |
| 451 |
|
{ |
| 452 |
|
int r, final; |
| 453 |
|
|
| 454 |
95280 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
| 455 |
95280 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 456 |
95280 |
CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC); |
| 457 |
95280 |
AN(vdc->vsl); |
| 458 |
95280 |
AZ(vdc->oc); |
| 459 |
95280 |
vdc->hp = NULL; |
| 460 |
95280 |
vdc->clen = NULL; |
| 461 |
95280 |
final = oc->flags & OC_F_TRANSIENT ? 1 : 0; |
| 462 |
95280 |
r = ObjIterate(vdc->wrk, oc, vdc, VDP_ObjIterate, final); |
| 463 |
95280 |
if (r < 0) |
| 464 |
1611 |
return (r); |
| 465 |
93669 |
return (0); |
| 466 |
95280 |
} |