varnish-cache/lib/libvarnishapi/vsl_cursor.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 * Author: Martin Blix Grydeland <martin@varnish-software.com>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <sys/mman.h>
36
#include <sys/stat.h>
37
#include <sys/types.h>
38
39
#include <fcntl.h>
40
#include <stdint.h>
41
#include <stdio.h>
42
#include <stdlib.h>
43
#include <string.h>
44
#include <unistd.h>
45
46
#include "vdef.h"
47
#include "vas.h"
48
#include "miniobj.h"
49
#include "vmb.h"
50
51
#include "vqueue.h"
52
#include "vre.h"
53
#include "vsl_priv.h"
54
55
#include "vapi/vsl.h"
56
#include "vapi/vsm.h"
57
58
#include "vsl_api.h"
59
60
struct vslc_vsm {
61
        unsigned                        magic;
62
#define VSLC_VSM_MAGIC                  0x4D3903A6
63
64
        struct VSL_cursor               cursor;
65
66
        unsigned                        options;
67
68
        struct vsm                      *vsm;
69
        struct vsm_fantom               vf;
70
71
        const struct VSL_head           *head;
72
        const uint32_t                  *end;
73
        struct VSLC_ptr                 next;
74
};
75
76
static void
77 106603
vslc_vsm_delete(const struct VSL_cursor *cursor)
78
{
79
        struct vslc_vsm *c;
80
81 106603
        AN(cursor);
82 106603
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
83 106603
        AZ(VSM_Unmap(c->vsm, &c->vf));
84 106603
        assert(&c->cursor == cursor);
85 106603
        FREE_OBJ(c);
86 106603
}
87
88
/*
89
 * We tolerate the fact that segment_n wraps around eventually: for the default
90
 * vsl_space of 80MB and 8 segments, each segment is 10MB long, so we wrap
91
 * roughly after 40 pebibytes (32bit) or 160 yobibytes (64bit) worth of vsl
92
 * written.
93
 *
94
 * The vsm_check would fail if a vslc paused while this amount of data was
95
 * written
96
 */
97
98
static enum vsl_check v_matchproto_(vslc_check_f)
99 30557084
vslc_vsm_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
100
{
101
        const struct vslc_vsm *c;
102
        unsigned dist;
103
104 30557084
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
105 30557084
        assert(&c->cursor == cursor);
106
107 30557084
        if (ptr->ptr == NULL)
108 0
                return (vsl_check_e_inval);
109
110 30557084
        dist = c->head->segment_n - ptr->priv;
111
112 30557084
        if (dist >= VSL_SEGMENTS - 2)
113
                /* Too close to continue */
114 0
                return (vsl_check_e_inval);
115 30557084
        if (dist >= VSL_SEGMENTS - 4)
116
                /* Warning level */
117 0
                return (vsl_check_warn);
118
        /* Safe */
119 30557084
        return (vsl_check_valid);
120 30557084
}
121
122
static enum vsl_status v_matchproto_(vslc_next_f)
123 27560159
vslc_vsm_next(const struct VSL_cursor *cursor)
124
{
125
        struct vslc_vsm *c;
126
        enum vsl_check i;
127
        uint32_t t;
128
129 27560159
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
130 27560159
        assert(&c->cursor == cursor);
131
132 27560159
        while (1) {
133 29880791
                i = vslc_vsm_check(&c->cursor, &c->next);
134 29880791
                if (i < vsl_check_warn) {
135 0
                        if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
136 0
                                return (vsl_e_abandon);
137
                        else
138 0
                                return (vsl_e_overrun);
139
                }
140
141 29880791
                t = *(volatile const uint32_t *)c->next.ptr;
142 29880791
                AN(t);
143
144 29880791
                if (t == VSL_ENDMARKER) {
145 1300761
                        if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
146 0
                                return (vsl_e_abandon);
147 1300761
                        if (c->options & VSL_COPT_TAILSTOP)
148 2280
                                return (vsl_e_eof);
149
                        /* No new records available */
150 1298481
                        return (vsl_end);
151
                }
152
153
                /* New data observed. Ensure load ordering with the log
154
                 * writer. */
155 28580030
                VRMB();
156
157 28580030
                if (t == VSL_WRAPMARKER) {
158
                        /* Wrap around not possible at front */
159 0
                        assert(c->next.ptr != c->head->log);
160 0
                        c->next.ptr = c->head->log;
161 0
                        while (c->next.priv % VSL_SEGMENTS)
162 0
                                c->next.priv++;
163 0
                        continue;
164
                }
165
166 28580030
                c->cursor.rec = c->next;
167 28580030
                c->next.ptr = VSL_NEXT(c->next.ptr);
168
169 28580030
                if (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch) {
170 2733474
                        if (!(c->options & VSL_COPT_BATCH))
171
                                /* Skip the batch record */
172 2320632
                                continue;
173
                        /* Next call will point to the first record past
174
                           the batch */
175 412842
                        c->next.ptr +=
176 412842
                            VSL_WORDS(VSL_BATCHLEN(c->cursor.rec.ptr));
177 412842
                }
178
179 26259478
                while ((c->next.ptr - c->head->log) / c->head->segsize >
180 26259478
                    c->next.priv % VSL_SEGMENTS)
181 80
                        c->next.priv++;
182
183 26259398
                assert(c->next.ptr >= c->head->log);
184 26259398
                assert(c->next.ptr < c->end);
185
186 26259398
                return (vsl_more);
187
        }
188 27560159
}
189
190
static enum vsl_status v_matchproto_(vslc_reset_f)
191 106603
vslc_vsm_reset(const struct VSL_cursor *cursor)
192
{
193
        struct vslc_vsm *c;
194
        unsigned u, segment_n;
195
        enum vsl_status r;
196
197 106603
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
198 106603
        assert(&c->cursor == cursor);
199 106603
        c->cursor.rec.ptr = NULL;
200
201 106603
        segment_n = c->head->segment_n;
202
        /* Make sure offset table is not stale compared to segment_n */
203 106603
        VRMB();
204
205 106603
        if (c->options & VSL_COPT_TAIL) {
206
                /* Start in the same segment varnishd currently is in and
207
                   run forward until we see the end */
208 65763
                u = c->next.priv = segment_n;
209 65763
                assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
210 131526
                c->next.ptr = c->head->log +
211 65763
                    c->head->offset[c->next.priv % VSL_SEGMENTS];
212 65763
                do {
213 14444427
                        if (c->head->segment_n - u > 1) {
214
                                /* Give up if varnishd is moving faster
215
                                   than us */
216 0
                                return (vsl_e_overrun);
217
                        }
218 14444427
                        r = vslc_vsm_next(&c->cursor);
219 14444427
                } while (r == vsl_more);
220 65763
                if (r != vsl_end)
221 0
                        return (r);
222 65763
        } else {
223
                /* Starting (VSL_SEGMENTS - 3) behind varnishd. This way
224
                 * even if varnishd advances segment_n immediately, we'll
225
                 * still have a full segment worth of log before the
226
                 * general constraint of at least 2 segments apart will be
227
                 * broken.
228
                 */
229 40840
                c->next.priv = segment_n - (VSL_SEGMENTS - 3);
230 245040
                while (c->head->offset[c->next.priv % VSL_SEGMENTS] < 0) {
231
                        /* seg 0 must be initialized */
232 204200
                        assert(c->next.priv % VSL_SEGMENTS != 0);
233 204200
                        c->next.priv++;
234
                }
235 40840
                assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
236 81680
                c->next.ptr = c->head->log +
237 40840
                    c->head->offset[c->next.priv % VSL_SEGMENTS];
238
        }
239 106603
        assert(c->next.ptr >= c->head->log);
240 106603
        assert(c->next.ptr < c->end);
241 106603
        return (vsl_end);
242 106603
}
243
244
static const struct vslc_tbl vslc_vsm_tbl = {
245
        .magic          = VSLC_TBL_MAGIC,
246
        .delete         = vslc_vsm_delete,
247
        .next           = vslc_vsm_next,
248
        .reset          = vslc_vsm_reset,
249
        .check          = vslc_vsm_check,
250
};
251
252
struct VSL_cursor *
253 270242
VSL_CursorVSM(struct VSL_data *vsl, struct vsm *vsm, unsigned options)
254
{
255
        struct vslc_vsm *c;
256
        struct vsm_fantom vf;
257
        struct VSL_head *head;
258
        enum vsl_status r;
259
260 270242
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
261
262 270242
        if (!VSM_Get(vsm, &vf, VSL_CLASS, NULL)) {
263 163624
                (void)vsl_diag(vsl,
264
                    "No VSL chunk found (child not started ?)");
265 163624
                return (NULL);
266
        }
267 106618
        if (VSM_Map(vsm, &vf)) {
268 30
                (void)vsl_diag(vsl,
269 15
                    "VSM_Map(): %s", VSM_Error(vsm));
270 15
                return (NULL);
271
        }
272 106603
        AN(vf.b);
273
274 106603
        head = vf.b;
275 106603
        if (memcmp(head->marker, VSL_HEAD_MARKER, sizeof head->marker)) {
276 0
                AZ(VSM_Unmap(vsm, &vf));
277 0
                (void)vsl_diag(vsl, "Not a VSL chunk");
278 0
                return (NULL);
279
        }
280 106603
        ALLOC_OBJ(c, VSLC_VSM_MAGIC);
281 106603
        if (c == NULL) {
282 0
                AZ(VSM_Unmap(vsm, &vf));
283 0
                (void)vsl_diag(vsl, "Out of memory");
284 0
                return (NULL);
285
        }
286 106603
        c->cursor.priv_tbl = &vslc_vsm_tbl;
287 106603
        c->cursor.priv_data = c;
288
289 106603
        c->options = options;
290 106603
        c->vsm = vsm;
291 106603
        c->vf = vf;
292 106603
        c->head = head;
293 106603
        c->end = c->head->log + c->head->segsize * VSL_SEGMENTS;
294 106603
        assert(c->end <= (const uint32_t *)vf.e);
295
296 106603
        r = vslc_vsm_reset(&c->cursor);
297 106603
        if (r != vsl_end) {
298 0
                AZ(VSM_Unmap(vsm, &vf));
299 0
                (void)vsl_diag(vsl, "Cursor initialization failure (%d)", r);
300 0
                FREE_OBJ(c);
301 0
                return (NULL);
302
        }
303
304 106603
        return (&c->cursor);
305 270242
}
306
307
struct vslc_file {
308
        unsigned                        magic;
309
#define VSLC_FILE_MAGIC                 0x1D65FFEF
310
311
        int                             fd;
312
        int                             close_fd;
313
        ssize_t                         buflen;
314
        uint32_t                        *buf;
315
316
        struct VSL_cursor               cursor;
317
318
};
319
320
static void
321 80
vslc_file_delete(const struct VSL_cursor *cursor)
322
{
323
        struct vslc_file *c;
324
325 80
        AN(cursor);
326 80
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
327 80
        assert(&c->cursor == cursor);
328 80
        if (c->close_fd)
329 0
                (void)close(c->fd);
330 80
        if (c->buf != NULL)
331 80
                free(c->buf);
332 80
        FREE_OBJ(c);
333 80
}
334
335
/* Read n bytes from fd into buf */
336
static ssize_t
337 8000
vslc_file_readn(int fd, void *buf, ssize_t n)
338
{
339 8000
        ssize_t t = 0;
340
        ssize_t l;
341
342 15920
        while (t < n) {
343 8000
                l = read(fd, (char *)buf + t, n - t);
344 8000
                if (l <= 0)
345 80
                        return (l);
346 7920
                t += l;
347
        }
348 7920
        return (t);
349 8000
}
350
351
static enum vsl_status v_matchproto_(vslc_next_f)
352 3920
vslc_file_next(const struct VSL_cursor *cursor)
353
{
354
        struct vslc_file *c;
355
        ssize_t i;
356
        ssize_t l;
357
358 3920
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
359 3920
        assert(&c->cursor == cursor);
360
361 3920
        do {
362 3920
                c->cursor.rec.ptr = NULL;
363 3920
                assert(c->buflen >= 2);
364 3920
                i = vslc_file_readn(c->fd, c->buf, VSL_BYTES(VSL_OVERHEAD));
365 3920
                if (i < 0)
366 0
                        return (vsl_e_io);
367 3920
                if (i == 0)
368 80
                        return (vsl_e_eof);
369 3840
                assert(i == VSL_BYTES(VSL_OVERHEAD));
370 3840
                l = VSL_OVERHEAD + VSL_WORDS(VSL_LEN(c->buf));
371 3840
                if (c->buflen < l) {
372 0
                        while (c->buflen < l)
373 0
                                c->buflen = 2 * l;
374 0
                        c->buf = realloc(c->buf, VSL_BYTES(c->buflen));
375 0
                        AN(c->buf);
376 0
                }
377 3840
                if (l > 2) {
378 7680
                        i = vslc_file_readn(c->fd, c->buf + VSL_OVERHEAD,
379 3840
                            VSL_BYTES(l - VSL_OVERHEAD));
380 3840
                        if (i < 0)
381 0
                                return (vsl_e_io);
382 3840
                        if (i == 0)
383 0
                                return (vsl_e_eof);
384 3840
                        assert(i == VSL_BYTES(l - VSL_OVERHEAD));
385 3840
                }
386 3840
                c->cursor.rec.ptr = c->buf;
387 3840
        } while (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch);
388 3840
        return (vsl_more);
389 3920
}
390
391
static enum vsl_status v_matchproto_(vslc_reset_f)
392 0
vslc_file_reset(const struct VSL_cursor *cursor)
393
{
394 0
        (void)cursor;
395
        /* XXX: Implement me */
396 0
        return (vsl_e_eof);
397
}
398
399
static const struct vslc_tbl vslc_file_tbl = {
400
        .magic          = VSLC_TBL_MAGIC,
401
        .delete         = vslc_file_delete,
402
        .next           = vslc_file_next,
403
        .reset          = vslc_file_reset,
404
        .check          = NULL,
405
};
406
407
struct vslc_mmap {
408
        unsigned                        magic;
409
#define VSLC_MMAP_MAGIC                 0x7de15f61
410
        int                             fd;
411
        int                             close_fd;
412
        char                            *b;
413
        char                            *e;
414
        struct VSL_cursor               cursor;
415
        struct VSLC_ptr                 next;
416
};
417
418
static void
419 120
vslc_mmap_delete(const struct VSL_cursor *cursor)
420
{
421
        struct vslc_mmap *c;
422
423 120
        AN(cursor);
424 120
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
425 120
        assert(&c->cursor == cursor);
426 120
        AZ(munmap(c->b, c->e - c->b));
427 120
        if (c->close_fd)
428 120
                (void)close(c->fd);
429 120
        FREE_OBJ(c);
430 120
}
431
432
static enum vsl_status v_matchproto_(vslc_next_f)
433 1440
vslc_mmap_next(const struct VSL_cursor *cursor)
434
{
435
        struct vslc_mmap *c;
436
        const char *t;
437
438 1440
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
439 1440
        assert(&c->cursor == cursor);
440 1440
        c->cursor.rec = c->next;
441 1440
        t = TRUST_ME(c->cursor.rec.ptr);
442 1440
        if (t == c->e)
443 120
                return (vsl_e_eof);
444 1320
        c->next.ptr = VSL_NEXT(c->next.ptr);
445 1320
        t = TRUST_ME(c->next.ptr);
446 1320
        if (t > c->e)
447 0
                return (vsl_e_io);
448 1320
        return (vsl_more);
449 1440
}
450
451
static enum vsl_status v_matchproto_(vslc_reset_f)
452 0
vslc_mmap_reset(const struct VSL_cursor *cursor)
453
{
454
        struct vslc_mmap *c;
455
456 0
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
457 0
        assert(&c->cursor == cursor);
458 0
        return (vsl_e_eof);
459
}
460
461
static enum vsl_check v_matchproto_(vslc_check_f)
462 2000
vslc_mmap_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
463
{
464
        struct vslc_mmap *c;
465
        const char *t;
466
467 2000
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
468 2000
        assert(&c->cursor == cursor);
469 2000
        AN(ptr->ptr);
470 2000
        t = TRUST_ME(ptr->ptr);
471 2000
        assert(t > c->b);
472 2000
        assert(t <= c->e);
473 2000
        return (vsl_check_valid);
474
}
475
476
static const struct vslc_tbl vslc_mmap_tbl = {
477
        .magic          = VSLC_TBL_MAGIC,
478
        .delete         = vslc_mmap_delete,
479
        .next           = vslc_mmap_next,
480
        .reset          = vslc_mmap_reset,
481
        .check          = vslc_mmap_check,
482
};
483
484
static struct VSL_cursor *
485 200
vsl_cursor_mmap(struct VSL_data *vsl, int fd, int close_fd)
486
{
487
        struct vslc_mmap *c;
488
        struct stat st[1];
489
        void *p;
490
491 200
        AZ(fstat(fd, st));
492 200
        if ((st->st_mode & S_IFMT) != S_IFREG)
493 80
                return (MAP_FAILED);
494
495 120
        assert(st->st_size >= (off_t)(sizeof VSL_FILE_ID));
496 120
        p = mmap(NULL, st->st_size, PROT_READ, MAP_PRIVATE, fd, 0);
497 120
        if (p == MAP_FAILED) {
498 0
                vsl_diag(vsl, "Cannot mmap: %s", strerror(errno));
499 0
                return (MAP_FAILED);
500
        }
501
502 120
        ALLOC_OBJ(c, VSLC_MMAP_MAGIC);
503 120
        if (c == NULL) {
504 0
                (void)munmap(p, st->st_size);
505 0
                if (close_fd)
506 0
                        (void)close(fd);
507 0
                vsl_diag(vsl, "Out of memory");
508 0
                return (NULL);
509
        }
510 120
        c->cursor.priv_tbl = &vslc_mmap_tbl;
511 120
        c->cursor.priv_data = c;
512
513 120
        c->fd = fd;
514 120
        c->close_fd = close_fd;
515 120
        c->b = p;
516 120
        c->e = c->b + st->st_size;
517 120
        c->next.ptr = TRUST_ME(c->b + sizeof VSL_FILE_ID);
518
519 120
        return (&c->cursor);
520 200
}
521
522
struct VSL_cursor *
523 280
VSL_CursorFile(struct VSL_data *vsl, const char *name, unsigned options)
524
{
525
        struct VSL_cursor *mc;
526
        struct vslc_file *c;
527
        int fd;
528 280
        int close_fd = 0;
529
        char buf[sizeof VSL_FILE_ID];
530
        ssize_t i;
531
532 280
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
533 280
        AN(name);
534 280
        (void)options;
535
536 280
        if (!strcmp(name, "-"))
537 80
                fd = STDIN_FILENO;
538
        else {
539 200
                fd = open(name, O_RDONLY);
540 200
                if (fd < 0) {
541 80
                        vsl_diag(vsl, "Cannot open %s: %s", name,
542 40
                            strerror(errno));
543 40
                        return (NULL);
544
                }
545 160
                close_fd = 1;
546
        }
547
548 240
        i = vslc_file_readn(fd, buf, sizeof buf);
549 240
        if (i <= 0) {
550 0
                if (close_fd)
551 0
                        (void)close(fd);
552 0
                vsl_diag(vsl, "VSL file read error: %s",
553 0
                    i < 0 ? strerror(errno) : "EOF");
554 0
                return (NULL);
555
        }
556 240
        assert(i == sizeof buf);
557 240
        if (memcmp(buf, VSL_FILE_ID, sizeof buf)) {
558 40
                if (close_fd)
559 40
                        (void)close(fd);
560 40
                vsl_diag(vsl, "Not a VSL file: %s", name);
561 40
                return (NULL);
562
        }
563
564 200
        mc = vsl_cursor_mmap(vsl, fd, close_fd);
565 200
        if (mc == NULL)
566 0
                return (NULL);
567 200
        if (mc != MAP_FAILED)
568 120
                return (mc);
569
570 80
        ALLOC_OBJ(c, VSLC_FILE_MAGIC);
571 80
        if (c == NULL) {
572 0
                if (close_fd)
573 0
                        (void)close(fd);
574 0
                vsl_diag(vsl, "Out of memory");
575 0
                return (NULL);
576
        }
577 80
        c->cursor.priv_tbl = &vslc_file_tbl;
578 80
        c->cursor.priv_data = c;
579
580 80
        c->fd = fd;
581 80
        c->close_fd = close_fd;
582 80
        c->buflen = VSL_WORDS(BUFSIZ);
583 80
        c->buf = malloc(VSL_BYTES(c->buflen));
584 80
        AN(c->buf);
585
586 80
        return (&c->cursor);
587 280
}
588
589
void
590 106803
VSL_DeleteCursor(const struct VSL_cursor *cursor)
591
{
592
        const struct vslc_tbl *tbl;
593
594 106803
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
595 106803
        if (tbl->delete == NULL)
596 0
                return;
597 106803
        (tbl->delete)(cursor);
598 106803
}
599
600
enum vsl_status
601 176590
VSL_ResetCursor(const struct VSL_cursor *cursor)
602
{
603
        const struct vslc_tbl *tbl;
604
605 176590
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
606 176590
        if (tbl->reset == NULL)
607 0
                return (vsl_e_eof);
608 176590
        return ((tbl->reset)(cursor));
609 176590
}
610
611
enum vsl_status
612 15558956
VSL_Next(const struct VSL_cursor *cursor)
613
{
614
        const struct vslc_tbl *tbl;
615
616 15558956
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
617 15558956
        AN(tbl->next);
618 15558956
        return ((tbl->next)(cursor));
619
}
620
621
enum vsl_check
622 681992
VSL_Check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
623
{
624
        const struct vslc_tbl *tbl;
625
626 681992
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
627 681992
        if (tbl->check == NULL)
628 3520
                return (vsl_check_e_notsupp);
629 678472
        return ((tbl->check)(cursor, ptr));
630 681992
}