varnish-cache/lib/libvarnishapi/vsl_cursor.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 * Author: Martin Blix Grydeland <martin@varnish-software.com>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <sys/mman.h>
36
#include <sys/stat.h>
37
#include <sys/types.h>
38
39
#include <fcntl.h>
40
#include <stdint.h>
41
#include <stdio.h>
42
#include <stdlib.h>
43
#include <string.h>
44
#include <unistd.h>
45
46
#include "vdef.h"
47
#include "vas.h"
48
#include "miniobj.h"
49
#include "vmb.h"
50
51
#include "vqueue.h"
52
#include "vre.h"
53
#include "vsl_priv.h"
54
55
#include "vapi/vsl.h"
56
#include "vapi/vsm.h"
57
58
#include "vsl_api.h"
59
60
struct vslc_vsm {
61
        unsigned                        magic;
62
#define VSLC_VSM_MAGIC                  0x4D3903A6
63
64
        struct VSL_cursor               cursor;
65
66
        unsigned                        options;
67
68
        struct vsm                      *vsm;
69
        struct vsm_fantom               vf;
70
71
        const struct VSL_head           *head;
72
        const uint32_t                  *end;
73
        struct VSLC_ptr                 next;
74
};
75
76
static void
77 109924
vslc_vsm_delete(const struct VSL_cursor *cursor)
78
{
79
        struct vslc_vsm *c;
80
81 109924
        AN(cursor);
82 109924
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
83 109924
        AZ(VSM_Unmap(c->vsm, &c->vf));
84 109924
        assert(&c->cursor == cursor);
85 109924
        FREE_OBJ(c);
86 109924
}
87
88
/*
89
 * We tolerate the fact that segment_n wraps around eventually: for the default
90
 * vsl_space of 80MB and 8 segments, each segment is 10MB long, so we wrap
91
 * roughly after 40 pebibytes (32bit) or 160 yobibytes (64bit) worth of vsl
92
 * written.
93
 *
94
 * The vsm_check would fail if a vslc paused while this amount of data was
95
 * written
96
 */
97
98
static enum vsl_check v_matchproto_(vslc_check_f)
99 31849911
vslc_vsm_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
100
{
101
        const struct vslc_vsm *c;
102
        unsigned dist;
103
104 31849911
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
105 31849911
        assert(&c->cursor == cursor);
106
107 31849911
        if (ptr->ptr == NULL)
108 0
                return (vsl_check_e_inval);
109
110 31849911
        dist = c->head->segment_n - ptr->priv;
111
112 31849911
        if (dist >= VSL_SEGMENTS - 2)
113
                /* Too close to continue */
114 0
                return (vsl_check_e_inval);
115 31849911
        if (dist >= VSL_SEGMENTS - 4)
116
                /* Warning level */
117 0
                return (vsl_check_warn);
118
        /* Safe */
119 31849911
        return (vsl_check_valid);
120 31849911
}
121
122
static enum vsl_status v_matchproto_(vslc_next_f)
123 28703533
vslc_vsm_next(const struct VSL_cursor *cursor)
124
{
125
        struct vslc_vsm *c;
126
        enum vsl_check i;
127
        uint32_t t;
128
129 28703533
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
130 28703533
        assert(&c->cursor == cursor);
131
132 28703533
        while (1) {
133 31182453
                i = vslc_vsm_check(&c->cursor, &c->next);
134 31182453
                if (i < vsl_check_warn) {
135 0
                        if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
136 0
                                return (vsl_e_abandon);
137
                        else
138 0
                                return (vsl_e_overrun);
139
                }
140
141 31182453
                t = *(volatile const uint32_t *)c->next.ptr;
142 31182453
                AN(t);
143
144 31182453
                if (t == VSL_ENDMARKER) {
145 1328863
                        if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
146 0
                                return (vsl_e_abandon);
147 1328863
                        if (c->options & VSL_COPT_TAILSTOP)
148 2280
                                return (vsl_e_eof);
149
                        /* No new records available */
150 1326583
                        return (vsl_end);
151
                }
152
153
                /* New data observed. Ensure load ordering with the log
154
                 * writer. */
155 29853590
                VRMB();
156
157 29853590
                if (t == VSL_WRAPMARKER) {
158
                        /* Wrap around not possible at front */
159 0
                        assert(c->next.ptr != c->head->log);
160 0
                        c->next.ptr = c->head->log;
161 0
                        while (c->next.priv % VSL_SEGMENTS)
162 0
                                c->next.priv++;
163 0
                        continue;
164
                }
165
166 29853590
                c->cursor.rec = c->next;
167 29853590
                c->next.ptr = VSL_NEXT(c->next.ptr);
168
169 29853590
                if (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch) {
170 2893371
                        if (!(c->options & VSL_COPT_BATCH))
171
                                /* Skip the batch record */
172 2478920
                                continue;
173
                        /* Next call will point to the first record past
174
                           the batch */
175 414451
                        c->next.ptr +=
176 414451
                            VSL_WORDS(VSL_BATCHLEN(c->cursor.rec.ptr));
177 414451
                }
178
179 27374750
                while ((c->next.ptr - c->head->log) / c->head->segsize >
180 27374750
                    c->next.priv % VSL_SEGMENTS)
181 80
                        c->next.priv++;
182
183 27374670
                assert(c->next.ptr >= c->head->log);
184 27374670
                assert(c->next.ptr < c->end);
185
186 27374670
                return (vsl_more);
187
        }
188 28703533
}
189
190
static enum vsl_status v_matchproto_(vslc_reset_f)
191 109924
vslc_vsm_reset(const struct VSL_cursor *cursor)
192
{
193
        struct vslc_vsm *c;
194
        unsigned u, segment_n;
195
        enum vsl_status r;
196
197 109924
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
198 109924
        assert(&c->cursor == cursor);
199 109924
        c->cursor.rec.ptr = NULL;
200
201 109924
        segment_n = c->head->segment_n;
202
        /* Make sure offset table is not stale compared to segment_n */
203 109924
        VRMB();
204
205 109924
        if (c->options & VSL_COPT_TAIL) {
206
                /* Start in the same segment varnishd currently is in and
207
                   run forward until we see the end */
208 68644
                u = c->next.priv = segment_n;
209 68644
                assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
210 137288
                c->next.ptr = c->head->log +
211 68644
                    c->head->offset[c->next.priv % VSL_SEGMENTS];
212 68644
                do {
213 15278938
                        if (c->head->segment_n - u > 1) {
214
                                /* Give up if varnishd is moving faster
215
                                   than us */
216 0
                                return (vsl_e_overrun);
217
                        }
218 15278938
                        r = vslc_vsm_next(&c->cursor);
219 15278938
                } while (r == vsl_more);
220 68644
                if (r != vsl_end)
221 0
                        return (r);
222 68644
        } else {
223
                /* Starting (VSL_SEGMENTS - 3) behind varnishd. This way
224
                 * even if varnishd advances segment_n immediately, we'll
225
                 * still have a full segment worth of log before the
226
                 * general constraint of at least 2 segments apart will be
227
                 * broken.
228
                 */
229 41280
                c->next.priv = segment_n - (VSL_SEGMENTS - 3);
230 247680
                while (c->head->offset[c->next.priv % VSL_SEGMENTS] < 0) {
231
                        /* seg 0 must be initialized */
232 206400
                        assert(c->next.priv % VSL_SEGMENTS != 0);
233 206400
                        c->next.priv++;
234
                }
235 41280
                assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
236 82560
                c->next.ptr = c->head->log +
237 41280
                    c->head->offset[c->next.priv % VSL_SEGMENTS];
238
        }
239 109924
        assert(c->next.ptr >= c->head->log);
240 109924
        assert(c->next.ptr < c->end);
241 109924
        return (vsl_end);
242 109924
}
243
244
static const struct vslc_tbl vslc_vsm_tbl = {
245
        .magic          = VSLC_TBL_MAGIC,
246
        .delete         = vslc_vsm_delete,
247
        .next           = vslc_vsm_next,
248
        .reset          = vslc_vsm_reset,
249
        .check          = vslc_vsm_check,
250
};
251
252
struct VSL_cursor *
253 289106
VSL_CursorVSM(struct VSL_data *vsl, struct vsm *vsm, unsigned options)
254
{
255
        struct vslc_vsm *c;
256
        struct vsm_fantom vf;
257
        struct VSL_head *head;
258
        enum vsl_status r;
259
260 289106
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
261
262 289106
        if (!VSM_Get(vsm, &vf, VSL_CLASS, NULL)) {
263 179167
                (void)vsl_diag(vsl,
264
                    "No VSL chunk found (child not started ?)");
265 179167
                return (NULL);
266
        }
267 109939
        if (VSM_Map(vsm, &vf)) {
268 30
                (void)vsl_diag(vsl,
269 15
                    "VSM_Map(): %s", VSM_Error(vsm));
270 15
                return (NULL);
271
        }
272 109924
        AN(vf.b);
273
274 109924
        head = vf.b;
275 109924
        if (memcmp(head->marker, VSL_HEAD_MARKER, sizeof head->marker)) {
276 0
                AZ(VSM_Unmap(vsm, &vf));
277 0
                (void)vsl_diag(vsl, "Not a VSL chunk");
278 0
                return (NULL);
279
        }
280 109924
        ALLOC_OBJ(c, VSLC_VSM_MAGIC);
281 109924
        if (c == NULL) {
282 0
                AZ(VSM_Unmap(vsm, &vf));
283 0
                (void)vsl_diag(vsl, "Out of memory");
284 0
                return (NULL);
285
        }
286 109924
        c->cursor.priv_tbl = &vslc_vsm_tbl;
287 109924
        c->cursor.priv_data = c;
288
289 109924
        c->options = options;
290 109924
        c->vsm = vsm;
291 109924
        c->vf = vf;
292 109924
        c->head = head;
293 109924
        c->end = c->head->log + c->head->segsize * VSL_SEGMENTS;
294 109924
        assert(c->end <= (const uint32_t *)vf.e);
295
296 109924
        r = vslc_vsm_reset(&c->cursor);
297 109924
        if (r != vsl_end) {
298 0
                AZ(VSM_Unmap(vsm, &vf));
299 0
                (void)vsl_diag(vsl, "Cursor initialization failure (%d)", r);
300 0
                FREE_OBJ(c);
301 0
                return (NULL);
302
        }
303
304 109924
        return (&c->cursor);
305 289106
}
306
307
struct vslc_file {
308
        unsigned                        magic;
309
#define VSLC_FILE_MAGIC                 0x1D65FFEF
310
311
        int                             fd;
312
        int                             close_fd;
313
        ssize_t                         buflen;
314
        uint32_t                        *buf;
315
316
        struct VSL_cursor               cursor;
317
318
};
319
320
static void
321 80
vslc_file_delete(const struct VSL_cursor *cursor)
322
{
323
        struct vslc_file *c;
324
325 80
        AN(cursor);
326 80
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
327 80
        assert(&c->cursor == cursor);
328 80
        if (c->close_fd)
329 0
                (void)close(c->fd);
330 80
        if (c->buf != NULL)
331 80
                free(c->buf);
332 80
        FREE_OBJ(c);
333 80
}
334
335
/* Read n bytes from fd into buf */
336
static ssize_t
337 8000
vslc_file_readn(int fd, void *buf, ssize_t n)
338
{
339 8000
        ssize_t t = 0;
340
        ssize_t l;
341
342 8000
        assert(n > 0);
343
344 15920
        while (t < n) {
345 8000
                l = read(fd, (char *)buf + t, n - t);
346 8000
                if (l <= 0)
347 80
                        return (l);
348 7920
                t += l;
349
        }
350 7920
        return (t);
351 8000
}
352
353
static enum vsl_status v_matchproto_(vslc_next_f)
354 3920
vslc_file_next(const struct VSL_cursor *cursor)
355
{
356
        struct vslc_file *c;
357
        ssize_t i;
358
        ssize_t l;
359
360 3920
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
361 3920
        assert(&c->cursor == cursor);
362
363 3920
        do {
364 3920
                c->cursor.rec.ptr = NULL;
365 3920
                assert(c->buflen >= 2);
366 3920
                i = vslc_file_readn(c->fd, c->buf, VSL_BYTES(VSL_OVERHEAD));
367 3920
                if (i < 0)
368 0
                        return (vsl_e_io);
369 3920
                if (i == 0)
370 80
                        return (vsl_e_eof);
371 3840
                assert(i == VSL_BYTES(VSL_OVERHEAD));
372 3840
                l = VSL_OVERHEAD + VSL_WORDS(VSL_LEN(c->buf));
373 3840
                if (c->buflen < l) {
374 0
                        while (c->buflen < l)
375 0
                                c->buflen = 2 * l;
376 0
                        c->buf = realloc(c->buf, VSL_BYTES(c->buflen));
377 0
                        AN(c->buf);
378 0
                }
379 3840
                if (l > 2) {
380 7680
                        i = vslc_file_readn(c->fd, c->buf + VSL_OVERHEAD,
381 3840
                            VSL_BYTES(l - VSL_OVERHEAD));
382 3840
                        if (i < 0)
383 0
                                return (vsl_e_io);
384 3840
                        if (i == 0)
385 0
                                return (vsl_e_eof);
386 3840
                        assert(i == VSL_BYTES(l - VSL_OVERHEAD));
387 3840
                }
388 3840
                c->cursor.rec.ptr = c->buf;
389 3840
        } while (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch);
390 3840
        return (vsl_more);
391 3920
}
392
393
static enum vsl_status v_matchproto_(vslc_reset_f)
394 0
vslc_file_reset(const struct VSL_cursor *cursor)
395
{
396 0
        (void)cursor;
397
        /* XXX: Implement me */
398 0
        return (vsl_e_eof);
399
}
400
401
static const struct vslc_tbl vslc_file_tbl = {
402
        .magic          = VSLC_TBL_MAGIC,
403
        .delete         = vslc_file_delete,
404
        .next           = vslc_file_next,
405
        .reset          = vslc_file_reset,
406
        .check          = NULL,
407
};
408
409
struct vslc_mmap {
410
        unsigned                        magic;
411
#define VSLC_MMAP_MAGIC                 0x7de15f61
412
        int                             fd;
413
        int                             close_fd;
414
        char                            *b;
415
        char                            *e;
416
        struct VSL_cursor               cursor;
417
        struct VSLC_ptr                 next;
418
};
419
420
static void
421 120
vslc_mmap_delete(const struct VSL_cursor *cursor)
422
{
423
        struct vslc_mmap *c;
424
425 120
        AN(cursor);
426 120
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
427 120
        assert(&c->cursor == cursor);
428 120
        AZ(munmap(c->b, c->e - c->b));
429 120
        if (c->close_fd)
430 120
                (void)close(c->fd);
431 120
        FREE_OBJ(c);
432 120
}
433
434
static enum vsl_status v_matchproto_(vslc_next_f)
435 1440
vslc_mmap_next(const struct VSL_cursor *cursor)
436
{
437
        struct vslc_mmap *c;
438
        const char *t;
439
440 1440
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
441 1440
        assert(&c->cursor == cursor);
442 1440
        c->cursor.rec = c->next;
443 1440
        t = TRUST_ME(c->cursor.rec.ptr);
444 1440
        if (t == c->e)
445 120
                return (vsl_e_eof);
446 1320
        c->next.ptr = VSL_NEXT(c->next.ptr);
447 1320
        t = TRUST_ME(c->next.ptr);
448 1320
        if (t > c->e)
449 0
                return (vsl_e_io);
450 1320
        return (vsl_more);
451 1440
}
452
453
static enum vsl_status v_matchproto_(vslc_reset_f)
454 0
vslc_mmap_reset(const struct VSL_cursor *cursor)
455
{
456
        struct vslc_mmap *c;
457
458 0
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
459 0
        assert(&c->cursor == cursor);
460 0
        return (vsl_e_eof);
461
}
462
463
static enum vsl_check v_matchproto_(vslc_check_f)
464 2000
vslc_mmap_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
465
{
466
        struct vslc_mmap *c;
467
        const char *t;
468
469 2000
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
470 2000
        assert(&c->cursor == cursor);
471 2000
        AN(ptr->ptr);
472 2000
        t = TRUST_ME(ptr->ptr);
473 2000
        assert(t > c->b);
474 2000
        assert(t <= c->e);
475 2000
        return (vsl_check_valid);
476
}
477
478
static const struct vslc_tbl vslc_mmap_tbl = {
479
        .magic          = VSLC_TBL_MAGIC,
480
        .delete         = vslc_mmap_delete,
481
        .next           = vslc_mmap_next,
482
        .reset          = vslc_mmap_reset,
483
        .check          = vslc_mmap_check,
484
};
485
486
static struct VSL_cursor *
487 200
vsl_cursor_mmap(struct VSL_data *vsl, int fd, int close_fd)
488
{
489
        struct vslc_mmap *c;
490
        struct stat st[1];
491
        void *p;
492
493 200
        AZ(fstat(fd, st));
494 200
        if ((st->st_mode & S_IFMT) != S_IFREG)
495 80
                return (MAP_FAILED);
496
497 120
        assert(st->st_size >= (off_t)(sizeof VSL_FILE_ID));
498 120
        p = mmap(NULL, st->st_size, PROT_READ, MAP_PRIVATE, fd, 0);
499 120
        if (p == MAP_FAILED) {
500 0
                vsl_diag(vsl, "Cannot mmap: %s", strerror(errno));
501 0
                return (MAP_FAILED);
502
        }
503
504 120
        ALLOC_OBJ(c, VSLC_MMAP_MAGIC);
505 120
        if (c == NULL) {
506 0
                (void)munmap(p, st->st_size);
507 0
                if (close_fd)
508 0
                        (void)close(fd);
509 0
                vsl_diag(vsl, "Out of memory");
510 0
                return (NULL);
511
        }
512 120
        c->cursor.priv_tbl = &vslc_mmap_tbl;
513 120
        c->cursor.priv_data = c;
514
515 120
        c->fd = fd;
516 120
        c->close_fd = close_fd;
517 120
        c->b = p;
518 120
        c->e = c->b + st->st_size;
519 120
        c->next.ptr = TRUST_ME(c->b + sizeof VSL_FILE_ID);
520
521 120
        return (&c->cursor);
522 200
}
523
524
struct VSL_cursor *
525 280
VSL_CursorFile(struct VSL_data *vsl, const char *name, unsigned options)
526
{
527
        struct VSL_cursor *mc;
528
        struct vslc_file *c;
529
        int fd;
530 280
        int close_fd = 0;
531
        char buf[sizeof VSL_FILE_ID];
532
        ssize_t i;
533
534 280
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
535 280
        AN(name);
536 280
        (void)options;
537
538 280
        if (!strcmp(name, "-"))
539 80
                fd = STDIN_FILENO;
540
        else {
541 200
                fd = open(name, O_RDONLY);
542 200
                if (fd < 0) {
543 80
                        vsl_diag(vsl, "Cannot open %s: %s", name,
544 40
                            strerror(errno));
545 40
                        return (NULL);
546
                }
547 160
                close_fd = 1;
548
        }
549
550 240
        i = vslc_file_readn(fd, buf, sizeof buf);
551 240
        if (i <= 0) {
552 0
                if (close_fd)
553 0
                        (void)close(fd);
554 0
                vsl_diag(vsl, "VSL file read error: %s",
555 0
                    i < 0 ? strerror(errno) : "EOF");
556 0
                return (NULL);
557
        }
558 240
        assert(i == sizeof buf);
559 240
        if (memcmp(buf, VSL_FILE_ID, sizeof buf)) {
560 40
                if (close_fd)
561 40
                        (void)close(fd);
562 40
                vsl_diag(vsl, "Not a VSL file: %s", name);
563 40
                return (NULL);
564
        }
565
566 200
        mc = vsl_cursor_mmap(vsl, fd, close_fd);
567 200
        if (mc == NULL)
568 0
                return (NULL);
569 200
        if (mc != MAP_FAILED)
570 120
                return (mc);
571
572 80
        ALLOC_OBJ(c, VSLC_FILE_MAGIC);
573 80
        if (c == NULL) {
574 0
                if (close_fd)
575 0
                        (void)close(fd);
576 0
                vsl_diag(vsl, "Out of memory");
577 0
                return (NULL);
578
        }
579 80
        c->cursor.priv_tbl = &vslc_file_tbl;
580 80
        c->cursor.priv_data = c;
581
582 80
        c->fd = fd;
583 80
        c->close_fd = close_fd;
584 80
        c->buflen = VSL_WORDS(BUFSIZ);
585 80
        c->buf = malloc(VSL_BYTES(c->buflen));
586 80
        AN(c->buf);
587
588 80
        return (&c->cursor);
589 280
}
590
591
void
592 110124
VSL_DeleteCursor(const struct VSL_cursor *cursor)
593
{
594
        const struct vslc_tbl *tbl;
595
596 110124
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
597 110124
        if (tbl->delete == NULL)
598 0
                return;
599 110124
        (tbl->delete)(cursor);
600 110124
}
601
602
enum vsl_status
603 176734
VSL_ResetCursor(const struct VSL_cursor *cursor)
604
{
605
        const struct vslc_tbl *tbl;
606
607 176734
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
608 176734
        if (tbl->reset == NULL)
609 0
                return (vsl_e_eof);
610 176734
        return ((tbl->reset)(cursor));
611 176734
}
612
613
enum vsl_status
614 15868112
VSL_Next(const struct VSL_cursor *cursor)
615
{
616
        const struct vslc_tbl *tbl;
617
618 15868112
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
619 15868112
        AN(tbl->next);
620 15868112
        return ((tbl->next)(cursor));
621
}
622
623
enum vsl_check
624 673250
VSL_Check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
625
{
626
        const struct vslc_tbl *tbl;
627
628 673250
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
629 673250
        if (tbl->check == NULL)
630 3520
                return (vsl_check_e_notsupp);
631 669730
        return ((tbl->check)(cursor, ptr));
632 673250
}