varnish-cache/lib/libvarnishapi/vsl_cursor.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 * Author: Martin Blix Grydeland <martin@varnish-software.com>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <sys/mman.h>
36
#include <sys/stat.h>
37
#include <sys/types.h>
38
39
#include <fcntl.h>
40
#include <stdint.h>
41
#include <stdio.h>
42
#include <stdlib.h>
43
#include <string.h>
44
#include <unistd.h>
45
46
#include "vdef.h"
47
#include "vas.h"
48
#include "miniobj.h"
49
#include "vmb.h"
50
51
#include "vqueue.h"
52
#include "vre.h"
53
#include "vsl_priv.h"
54
55
#include "vapi/vsl.h"
56
#include "vapi/vsm.h"
57
58
#include "vsl_api.h"
59
60
struct vslc_vsm {
61
        unsigned                        magic;
62
#define VSLC_VSM_MAGIC                  0x4D3903A6
63
64
        struct VSL_cursor               cursor;
65
66
        unsigned                        options;
67
68
        struct vsm                      *vsm;
69
        struct vsm_fantom               vf;
70
71
        const struct VSL_head           *head;
72
        const uint32_t                  *end;
73
        struct VSLC_ptr                 next;
74
};
75
76
static void
77 5462
vslc_vsm_delete(const struct VSL_cursor *cursor)
78
{
79
        struct vslc_vsm *c;
80
81 5462
        AN(cursor);
82 5462
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
83 5462
        AZ(VSM_Unmap(c->vsm, &c->vf));
84 5462
        assert(&c->cursor == cursor);
85 5462
        FREE_OBJ(c);
86 5462
}
87
88
/*
89
 * We tolerate the fact that segment_n wraps around eventually: for the default
90
 * vsl_space of 80MB and 8 segments, each segment is 10MB long, so we wrap
91
 * roughly after 40 pebibytes (32bit) or 160 yobibytes (64bit) worth of vsl
92
 * written.
93
 *
94
 * The vsm_check would fail if a vslc paused while this amount of data was
95
 * written
96
 */
97
98
static enum vsl_check v_matchproto_(vslc_check_f)
99 1600266
vslc_vsm_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
100
{
101
        const struct vslc_vsm *c;
102
        unsigned dist;
103
104 1600266
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
105 1600266
        assert(&c->cursor == cursor);
106
107 1600266
        if (ptr->ptr == NULL)
108 0
                return (vsl_check_e_inval);
109
110 1600266
        dist = c->head->segment_n - ptr->priv;
111
112 1600266
        if (dist >= VSL_SEGMENTS - 2)
113
                /* Too close to continue */
114 0
                return (vsl_check_e_inval);
115 1600266
        if (dist >= VSL_SEGMENTS - 4)
116
                /* Warning level */
117 0
                return (vsl_check_warn);
118
        /* Safe */
119 1600266
        return (vsl_check_valid);
120 1600266
}
121
122
static enum vsl_status v_matchproto_(vslc_next_f)
123 1439497
vslc_vsm_next(const struct VSL_cursor *cursor)
124
{
125
        struct vslc_vsm *c;
126
        enum vsl_check i;
127
        uint32_t t;
128
129 1439497
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
130 1439497
        assert(&c->cursor == cursor);
131
132 1439497
        while (1) {
133 1566713
                i = vslc_vsm_check(&c->cursor, &c->next);
134 1566713
                if (i < vsl_check_warn) {
135 0
                        if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
136 0
                                return (vsl_e_abandon);
137
                        else
138 0
                                return (vsl_e_overrun);
139
                }
140
141 1566713
                t = *(volatile const uint32_t *)c->next.ptr;
142 1566713
                AN(t);
143
144 1566713
                if (t == VSL_ENDMARKER) {
145 66656
                        if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
146 0
                                return (vsl_e_abandon);
147 66656
                        if (c->options & VSL_COPT_TAILSTOP)
148 114
                                return (vsl_e_eof);
149
                        /* No new records available */
150 66542
                        return (vsl_end);
151
                }
152
153
                /* New data observed. Ensure load ordering with the log
154
                 * writer. */
155 1500057
                VRMB();
156
157 1500057
                if (t == VSL_WRAPMARKER) {
158
                        /* Wrap around not possible at front */
159 0
                        assert(c->next.ptr != c->head->log);
160 0
                        c->next.ptr = c->head->log;
161 0
                        while (c->next.priv % VSL_SEGMENTS)
162 0
                                c->next.priv++;
163 0
                        continue;
164
                }
165
166 1500057
                c->cursor.rec = c->next;
167 1500057
                c->next.ptr = VSL_NEXT(c->next.ptr);
168
169 1500057
                if (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch) {
170 147944
                        if (!(c->options & VSL_COPT_BATCH))
171
                                /* Skip the batch record */
172 127216
                                continue;
173
                        /* Next call will point to the first record past
174
                           the batch */
175 20728
                        c->next.ptr +=
176 20728
                            VSL_WORDS(VSL_BATCHLEN(c->cursor.rec.ptr));
177 20728
                }
178
179 1372845
                while ((c->next.ptr - c->head->log) / c->head->segsize >
180 1372845
                    c->next.priv % VSL_SEGMENTS)
181 4
                        c->next.priv++;
182
183 1372841
                assert(c->next.ptr >= c->head->log);
184 1372841
                assert(c->next.ptr < c->end);
185
186 1372841
                return (vsl_more);
187
        }
188 1439497
}
189
190
static enum vsl_status v_matchproto_(vslc_reset_f)
191 5462
vslc_vsm_reset(const struct VSL_cursor *cursor)
192
{
193
        struct vslc_vsm *c;
194
        unsigned u, segment_n;
195
        enum vsl_status r;
196
197 5462
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
198 5462
        assert(&c->cursor == cursor);
199 5462
        c->cursor.rec.ptr = NULL;
200
201 5462
        segment_n = c->head->segment_n;
202
        /* Make sure offset table is not stale compared to segment_n */
203 5462
        VRMB();
204
205 5462
        if (c->options & VSL_COPT_TAIL) {
206
                /* Start in the same segment varnishd currently is in and
207
                   run forward until we see the end */
208 3398
                u = c->next.priv = segment_n;
209 3398
                assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
210 6796
                c->next.ptr = c->head->log +
211 3398
                    c->head->offset[c->next.priv % VSL_SEGMENTS];
212 3398
                do {
213 767869
                        if (c->head->segment_n - u > 1) {
214
                                /* Give up if varnishd is moving faster
215
                                   than us */
216 0
                                return (vsl_e_overrun);
217
                        }
218 767869
                        r = vslc_vsm_next(&c->cursor);
219 767869
                } while (r == vsl_more);
220 3398
                if (r != vsl_end)
221 0
                        return (r);
222 3398
        } else {
223
                /* Starting (VSL_SEGMENTS - 3) behind varnishd. This way
224
                 * even if varnishd advances segment_n immediately, we'll
225
                 * still have a full segment worth of log before the
226
                 * general constraint of at least 2 segments apart will be
227
                 * broken.
228
                 */
229 2064
                c->next.priv = segment_n - (VSL_SEGMENTS - 3);
230 12384
                while (c->head->offset[c->next.priv % VSL_SEGMENTS] < 0) {
231
                        /* seg 0 must be initialized */
232 10320
                        assert(c->next.priv % VSL_SEGMENTS != 0);
233 10320
                        c->next.priv++;
234
                }
235 2064
                assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
236 4128
                c->next.ptr = c->head->log +
237 2064
                    c->head->offset[c->next.priv % VSL_SEGMENTS];
238
        }
239 5462
        assert(c->next.ptr >= c->head->log);
240 5462
        assert(c->next.ptr < c->end);
241 5462
        return (vsl_end);
242 5462
}
243
244
static const struct vslc_tbl vslc_vsm_tbl = {
245
        .magic          = VSLC_TBL_MAGIC,
246
        .delete         = vslc_vsm_delete,
247
        .next           = vslc_vsm_next,
248
        .reset          = vslc_vsm_reset,
249
        .check          = vslc_vsm_check,
250
};
251
252
struct VSL_cursor *
253 14377
VSL_CursorVSM(struct VSL_data *vsl, struct vsm *vsm, unsigned options)
254
{
255
        struct vslc_vsm *c;
256
        struct vsm_fantom vf;
257
        struct VSL_head *head;
258
        enum vsl_status r;
259
260 14377
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
261
262 14377
        if (!VSM_Get(vsm, &vf, VSL_CLASS, NULL)) {
263 8913
                (void)vsl_diag(vsl,
264
                    "No VSL chunk found (child not started ?)");
265 8913
                return (NULL);
266
        }
267 5464
        if (VSM_Map(vsm, &vf)) {
268 4
                (void)vsl_diag(vsl,
269 2
                    "VSM_Map(): %s", VSM_Error(vsm));
270 2
                return (NULL);
271
        }
272 5462
        AN(vf.b);
273
274 5462
        head = vf.b;
275 5462
        if (memcmp(head->marker, VSL_HEAD_MARKER, sizeof head->marker)) {
276 0
                AZ(VSM_Unmap(vsm, &vf));
277 0
                (void)vsl_diag(vsl, "Not a VSL chunk");
278 0
                return (NULL);
279
        }
280 5462
        ALLOC_OBJ(c, VSLC_VSM_MAGIC);
281 5462
        if (c == NULL) {
282 0
                AZ(VSM_Unmap(vsm, &vf));
283 0
                (void)vsl_diag(vsl, "Out of memory");
284 0
                return (NULL);
285
        }
286 5462
        c->cursor.priv_tbl = &vslc_vsm_tbl;
287 5462
        c->cursor.priv_data = c;
288
289 5462
        c->options = options;
290 5462
        c->vsm = vsm;
291 5462
        c->vf = vf;
292 5462
        c->head = head;
293 5462
        c->end = c->head->log + c->head->segsize * VSL_SEGMENTS;
294 5462
        assert(c->end <= (const uint32_t *)vf.e);
295
296 5462
        r = vslc_vsm_reset(&c->cursor);
297 5462
        if (r != vsl_end) {
298 0
                AZ(VSM_Unmap(vsm, &vf));
299 0
                (void)vsl_diag(vsl, "Cursor initialization failure (%d)", r);
300 0
                FREE_OBJ(c);
301 0
                return (NULL);
302
        }
303
304 5462
        return (&c->cursor);
305 14377
}
306
307
struct vslc_file {
308
        unsigned                        magic;
309
#define VSLC_FILE_MAGIC                 0x1D65FFEF
310
311
        int                             fd;
312
        int                             close_fd;
313
        ssize_t                         buflen;
314
        uint32_t                        *buf;
315
316
        struct VSL_cursor               cursor;
317
318
};
319
320
static void
321 4
vslc_file_delete(const struct VSL_cursor *cursor)
322
{
323
        struct vslc_file *c;
324
325 4
        AN(cursor);
326 4
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
327 4
        assert(&c->cursor == cursor);
328 4
        if (c->close_fd)
329 0
                (void)close(c->fd);
330 4
        if (c->buf != NULL)
331 4
                free(c->buf);
332 4
        FREE_OBJ(c);
333 4
}
334
335
/* Read n bytes from fd into buf */
336
static ssize_t
337 400
vslc_file_readn(int fd, void *buf, ssize_t n)
338
{
339 400
        ssize_t t = 0;
340
        ssize_t l;
341
342 400
        assert(n > 0);
343
344 796
        while (t < n) {
345 400
                l = read(fd, (char *)buf + t, n - t);
346 400
                if (l <= 0)
347 4
                        return (l);
348 396
                t += l;
349
        }
350 396
        return (t);
351 400
}
352
353
static enum vsl_status v_matchproto_(vslc_next_f)
354 196
vslc_file_next(const struct VSL_cursor *cursor)
355
{
356
        struct vslc_file *c;
357
        ssize_t i;
358
        ssize_t l;
359
360 196
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
361 196
        assert(&c->cursor == cursor);
362
363 196
        do {
364 196
                c->cursor.rec.ptr = NULL;
365 196
                assert(c->buflen >= 2);
366 196
                i = vslc_file_readn(c->fd, c->buf, VSL_BYTES(VSL_OVERHEAD));
367 196
                if (i < 0)
368 0
                        return (vsl_e_io);
369 196
                if (i == 0)
370 4
                        return (vsl_e_eof);
371 192
                assert(i == VSL_BYTES(VSL_OVERHEAD));
372 192
                l = VSL_OVERHEAD + VSL_WORDS(VSL_LEN(c->buf));
373 192
                if (c->buflen < l) {
374 0
                        while (c->buflen < l)
375 0
                                c->buflen = 2 * l;
376 0
                        c->buf = realloc(c->buf, VSL_BYTES(c->buflen));
377 0
                        AN(c->buf);
378 0
                }
379 192
                if (l > 2) {
380 384
                        i = vslc_file_readn(c->fd, c->buf + VSL_OVERHEAD,
381 192
                            VSL_BYTES(l - VSL_OVERHEAD));
382 192
                        if (i < 0)
383 0
                                return (vsl_e_io);
384 192
                        if (i == 0)
385 0
                                return (vsl_e_eof);
386 192
                        assert(i == VSL_BYTES(l - VSL_OVERHEAD));
387 192
                }
388 192
                c->cursor.rec.ptr = c->buf;
389 192
        } while (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch);
390 192
        return (vsl_more);
391 196
}
392
393
static enum vsl_status v_matchproto_(vslc_reset_f)
394 0
vslc_file_reset(const struct VSL_cursor *cursor)
395
{
396 0
        (void)cursor;
397
        /* XXX: Implement me */
398 0
        return (vsl_e_eof);
399
}
400
401
static const struct vslc_tbl vslc_file_tbl = {
402
        .magic          = VSLC_TBL_MAGIC,
403
        .delete         = vslc_file_delete,
404
        .next           = vslc_file_next,
405
        .reset          = vslc_file_reset,
406
        .check          = NULL,
407
};
408
409
struct vslc_mmap {
410
        unsigned                        magic;
411
#define VSLC_MMAP_MAGIC                 0x7de15f61
412
        int                             fd;
413
        int                             close_fd;
414
        char                            *b;
415
        char                            *e;
416
        struct VSL_cursor               cursor;
417
        struct VSLC_ptr                 next;
418
};
419
420
static void
421 6
vslc_mmap_delete(const struct VSL_cursor *cursor)
422
{
423
        struct vslc_mmap *c;
424
425 6
        AN(cursor);
426 6
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
427 6
        assert(&c->cursor == cursor);
428 6
        AZ(munmap(c->b, c->e - c->b));
429 6
        if (c->close_fd)
430 6
                (void)close(c->fd);
431 6
        FREE_OBJ(c);
432 6
}
433
434
static enum vsl_status v_matchproto_(vslc_next_f)
435 72
vslc_mmap_next(const struct VSL_cursor *cursor)
436
{
437
        struct vslc_mmap *c;
438
        const char *t;
439
440 72
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
441 72
        assert(&c->cursor == cursor);
442 72
        c->cursor.rec = c->next;
443 72
        t = TRUST_ME(c->cursor.rec.ptr);
444 72
        if (t == c->e)
445 6
                return (vsl_e_eof);
446 66
        c->next.ptr = VSL_NEXT(c->next.ptr);
447 66
        t = TRUST_ME(c->next.ptr);
448 66
        if (t > c->e)
449 0
                return (vsl_e_io);
450 66
        return (vsl_more);
451 72
}
452
453
static enum vsl_status v_matchproto_(vslc_reset_f)
454 0
vslc_mmap_reset(const struct VSL_cursor *cursor)
455
{
456
        struct vslc_mmap *c;
457
458 0
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
459 0
        assert(&c->cursor == cursor);
460 0
        return (vsl_e_eof);
461
}
462
463
static enum vsl_check v_matchproto_(vslc_check_f)
464 100
vslc_mmap_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
465
{
466
        struct vslc_mmap *c;
467
        const char *t;
468
469 100
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
470 100
        assert(&c->cursor == cursor);
471 100
        AN(ptr->ptr);
472 100
        t = TRUST_ME(ptr->ptr);
473 100
        assert(t > c->b);
474 100
        assert(t <= c->e);
475 100
        return (vsl_check_valid);
476
}
477
478
static const struct vslc_tbl vslc_mmap_tbl = {
479
        .magic          = VSLC_TBL_MAGIC,
480
        .delete         = vslc_mmap_delete,
481
        .next           = vslc_mmap_next,
482
        .reset          = vslc_mmap_reset,
483
        .check          = vslc_mmap_check,
484
};
485
486
static struct VSL_cursor *
487 10
vsl_cursor_mmap(struct VSL_data *vsl, int fd, int close_fd)
488
{
489
        struct vslc_mmap *c;
490
        struct stat st[1];
491
        void *p;
492
493 10
        AZ(fstat(fd, st));
494 10
        if ((st->st_mode & S_IFMT) != S_IFREG)
495 4
                return (MAP_FAILED);
496
497 6
        assert(st->st_size >= (off_t)(sizeof VSL_FILE_ID));
498 6
        p = mmap(NULL, st->st_size, PROT_READ, MAP_PRIVATE, fd, 0);
499 6
        if (p == MAP_FAILED) {
500 0
                vsl_diag(vsl, "Cannot mmap: %s", strerror(errno));
501 0
                return (MAP_FAILED);
502
        }
503
504 6
        ALLOC_OBJ(c, VSLC_MMAP_MAGIC);
505 6
        if (c == NULL) {
506 0
                (void)munmap(p, st->st_size);
507 0
                if (close_fd)
508 0
                        (void)close(fd);
509 0
                vsl_diag(vsl, "Out of memory");
510 0
                return (NULL);
511
        }
512 6
        c->cursor.priv_tbl = &vslc_mmap_tbl;
513 6
        c->cursor.priv_data = c;
514
515 6
        c->fd = fd;
516 6
        c->close_fd = close_fd;
517 6
        c->b = p;
518 6
        c->e = c->b + st->st_size;
519 6
        c->next.ptr = TRUST_ME(c->b + sizeof VSL_FILE_ID);
520
521 6
        return (&c->cursor);
522 10
}
523
524
struct VSL_cursor *
525 14
VSL_CursorFile(struct VSL_data *vsl, const char *name, unsigned options)
526
{
527
        struct VSL_cursor *mc;
528
        struct vslc_file *c;
529
        int fd;
530 14
        int close_fd = 0;
531
        char buf[sizeof VSL_FILE_ID];
532
        ssize_t i;
533
534 14
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
535 14
        AN(name);
536 14
        (void)options;
537
538 14
        if (!strcmp(name, "-"))
539 4
                fd = STDIN_FILENO;
540
        else {
541 10
                fd = open(name, O_RDONLY);
542 10
                if (fd < 0) {
543 4
                        vsl_diag(vsl, "Cannot open %s: %s", name,
544 2
                            strerror(errno));
545 2
                        return (NULL);
546
                }
547 8
                close_fd = 1;
548
        }
549
550 12
        i = vslc_file_readn(fd, buf, sizeof buf);
551 12
        if (i <= 0) {
552 0
                if (close_fd)
553 0
                        (void)close(fd);
554 0
                vsl_diag(vsl, "VSL file read error: %s",
555 0
                    i < 0 ? strerror(errno) : "EOF");
556 0
                return (NULL);
557
        }
558 12
        assert(i == sizeof buf);
559 12
        if (memcmp(buf, VSL_FILE_ID, sizeof buf)) {
560 2
                if (close_fd)
561 2
                        (void)close(fd);
562 2
                vsl_diag(vsl, "Not a VSL file: %s", name);
563 2
                return (NULL);
564
        }
565
566 10
        mc = vsl_cursor_mmap(vsl, fd, close_fd);
567 10
        if (mc == NULL)
568 0
                return (NULL);
569 10
        if (mc != MAP_FAILED)
570 6
                return (mc);
571
572 4
        ALLOC_OBJ(c, VSLC_FILE_MAGIC);
573 4
        if (c == NULL) {
574 0
                if (close_fd)
575 0
                        (void)close(fd);
576 0
                vsl_diag(vsl, "Out of memory");
577 0
                return (NULL);
578
        }
579 4
        c->cursor.priv_tbl = &vslc_file_tbl;
580 4
        c->cursor.priv_data = c;
581
582 4
        c->fd = fd;
583 4
        c->close_fd = close_fd;
584 4
        c->buflen = VSL_WORDS(BUFSIZ);
585 4
        c->buf = malloc(VSL_BYTES(c->buflen));
586 4
        AN(c->buf);
587
588 4
        return (&c->cursor);
589 14
}
590
591
void
592 5472
VSL_DeleteCursor(const struct VSL_cursor *cursor)
593
{
594
        const struct vslc_tbl *tbl;
595
596 5472
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
597 5472
        if (tbl->delete == NULL)
598 0
                return;
599 5472
        (tbl->delete)(cursor);
600 5472
}
601
602
enum vsl_status
603 8830
VSL_ResetCursor(const struct VSL_cursor *cursor)
604
{
605
        const struct vslc_tbl *tbl;
606
607 8830
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
608 8830
        if (tbl->reset == NULL)
609 0
                return (vsl_e_eof);
610 8830
        return ((tbl->reset)(cursor));
611 8830
}
612
613
enum vsl_status
614 793869
VSL_Next(const struct VSL_cursor *cursor)
615
{
616
        const struct vslc_tbl *tbl;
617
618 793869
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
619 793869
        AN(tbl->next);
620 793869
        return ((tbl->next)(cursor));
621
}
622
623
enum vsl_check
624 33806
VSL_Check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
625
{
626
        const struct vslc_tbl *tbl;
627
628 33806
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
629 33806
        if (tbl->check == NULL)
630 176
                return (vsl_check_e_notsupp);
631 33630
        return ((tbl->check)(cursor, ptr));
632 33806
}