varnish-cache/lib/libvarnishapi/vsl_dispatch.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Martin Blix Grydeland <martin@varnish-software.com>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdarg.h>
35
#include <stdint.h>
36
#include <stdio.h>
37
#include <stdlib.h>
38
#include <string.h>
39
40
#include "vdef.h"
41
#include "vas.h"
42
#include "miniobj.h"
43
44
#include "vqueue.h"
45
#include "vre.h"
46
#include "vtim.h"
47
#include "vtree.h"
48
49
#include "vapi/vsl.h"
50
51
#include "vsl_api.h"
52
53
#define VTX_CACHE 10
54
#define VTX_BUFSIZE_MIN 64
55
#define VTX_SHMCHUNKS 3
56
57
static const char * const vsl_t_names[VSL_t__MAX] = {
58
        [VSL_t_unknown] = "unknown",
59
        [VSL_t_sess]    = "sess",
60
        [VSL_t_req]     = "req",
61
        [VSL_t_bereq]   = "bereq",
62
        [VSL_t_raw]     = "raw",
63
};
64
65
static const char * const vsl_r_names[VSL_r__MAX] = {
66
        [VSL_r_unknown] = "unknown",
67
        [VSL_r_http_1]  = "HTTP/1",
68
        [VSL_r_rxreq]   = "rxreq",
69
        [VSL_r_esi]     = "esi",
70
        [VSL_r_restart] = "restart",
71
        [VSL_r_pass]    = "pass",
72
        [VSL_r_fetch]   = "fetch",
73
        [VSL_r_bgfetch] = "bgfetch",
74
        [VSL_r_pipe]    = "pipe",
75
};
76
77
struct vtx;
78
VTAILQ_HEAD(vtxhead, vtx);
79
80
struct vslc_raw {
81
        unsigned                magic;
82
#define VSLC_RAW_MAGIC          0x247EBD44
83
84
        struct VSL_cursor       cursor;
85
86
        const uint32_t          *ptr;
87
};
88
89
struct synth {
90
        unsigned                magic;
91
#define SYNTH_MAGIC             0xC654479F
92
93
        VTAILQ_ENTRY(synth)     list;
94
        size_t                  offset;
95
        uint32_t                data[VSL_OVERHEAD + VSL_WORDS(64)];
96
};
97
VTAILQ_HEAD(synthhead, synth);
98
99
enum chunk_t {
100
        chunk_t__unassigned,
101
        chunk_t_shm,
102
        chunk_t_buf,
103
};
104
105
struct chunk {
106
        unsigned                                magic;
107
#define CHUNK_MAGIC                             0x48DC0194
108
        enum chunk_t                            type;
109
        union {
110
                struct {
111
                        struct VSLC_ptr         start;
112
                        VTAILQ_ENTRY(chunk)     shmref;
113
                } shm;
114
                struct {
115
                        uint32_t                *data;
116
                        size_t                  space;
117
                } buf;
118
        };
119
        size_t                                  len;
120
        struct vtx                              *vtx;
121
        VTAILQ_ENTRY(chunk)                     list;
122
};
123
VTAILQ_HEAD(chunkhead, chunk);
124
125
struct vslc_vtx {
126
        unsigned                magic;
127
#define VSLC_VTX_MAGIC          0x74C6523F
128
129
        struct VSL_cursor       cursor;
130
131
        struct vtx              *vtx;
132
        struct synth            *synth;
133
        struct chunk            *chunk;
134
        size_t                  chunkstart;
135
        size_t                  offset;
136
};
137
138
struct vtx_key {
139
        uint64_t                vxid;
140
        VRBT_ENTRY(vtx_key)     entry;
141
};
142
VRBT_HEAD(vtx_tree, vtx_key);
143
144
struct vtx {
145
        struct vtx_key          key;
146
        unsigned                magic;
147
#define VTX_MAGIC               0xACC21D09
148
        VTAILQ_ENTRY(vtx)       list_child;
149
        VTAILQ_ENTRY(vtx)       list_vtx;
150
151
        double                  t_start;
152
        unsigned                flags;
153
#define VTX_F_BEGIN             0x1 /* Begin record processed */
154
#define VTX_F_END               0x2 /* End record processed */
155
#define VTX_F_COMPLETE          0x4 /* Marked complete. No new children
156
                                       should be appended */
157
#define VTX_F_READY             0x8 /* This vtx and all it's children are
158
                                       complete */
159
160
        enum VSL_transaction_e  type;
161
        enum VSL_reason_e       reason;
162
163
        struct vtx              *parent;
164
        struct vtxhead          child;
165
        unsigned                n_child;
166
        unsigned                n_childready;
167
        unsigned                n_descend;
168
169
        struct synthhead        synth;
170
171
        struct chunk            shmchunks[VTX_SHMCHUNKS];
172
        struct chunkhead        shmchunks_free;
173
174
        struct chunkhead        chunks;
175
        size_t                  len;
176
177
        struct vslc_vtx         c;
178
};
179
180
struct VSLQ {
181
        unsigned                magic;
182
#define VSLQ_MAGIC              0x23A8BE97
183
184
        struct VSL_data         *vsl;
185
        struct VSL_cursor       *c;
186
        struct vslq_query       *query;
187
188
        enum VSL_grouping_e     grouping;
189
190
        /* Structured mode */
191
        struct vtx_tree         tree;
192
        struct vtxhead          ready;
193
        struct vtxhead          incomplete;
194
        int                     n_outstanding;
195
        struct chunkhead        shmrefs;
196
        struct vtxhead          cache;
197
        unsigned                n_cache;
198
199
        /* Rate limiting */
200
        double                  credits;
201
        vtim_mono               last_use;
202
203
        /* Raw mode */
204
        struct {
205
                struct vslc_raw         c;
206
                struct VSL_transaction  trans;
207
                struct VSL_transaction  *ptrans[2];
208
                struct VSLC_ptr         start;
209
                ssize_t                 len;
210
                ssize_t                 offset;
211
        } raw;
212
};
213
214
static void vtx_synth_rec(struct vtx *vtx, unsigned tag, const char *fmt, ...);
215
/*lint -esym(534, vtx_diag) */
216
static int vtx_diag(struct vtx *vtx, const char *msg);
217
/*lint -esym(534, vtx_diag_tag) */
218
static int vtx_diag_tag(struct vtx *vtx, const uint32_t *ptr,
219
    const char *reason);
220
221
static inline int
222 317641
vtx_keycmp(const struct vtx_key *a, const struct vtx_key *b)
223
{
224 317641
        if (a->vxid < b->vxid)
225 8799
                return (-1);
226 308842
        if (a->vxid > b->vxid)
227 124857
                return (1);
228 183985
        return (0);
229 317641
}
230
231 20746
VRBT_GENERATE_REMOVE_COLOR(vtx_tree, vtx_key, entry, static)
232 51059
VRBT_GENERATE_REMOVE(vtx_tree, vtx_key, entry, static)
233 25518
VRBT_GENERATE_INSERT_COLOR(vtx_tree, vtx_key, entry, static)
234 31168
VRBT_GENERATE_INSERT_FINISH(vtx_tree, vtx_key, entry, static)
235 57521
VRBT_GENERATE_INSERT(vtx_tree, vtx_key, entry, vtx_keycmp, static)
236 337671
VRBT_GENERATE_FIND(vtx_tree, vtx_key, entry, vtx_keycmp, static)
237
238
static enum vsl_status v_matchproto_(vslc_next_f)
239 542077
vslc_raw_next(const struct VSL_cursor *cursor)
240
{
241
        struct vslc_raw *c;
242
243 542077
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_RAW_MAGIC);
244 542077
        assert(&c->cursor == cursor);
245
246 542077
        AN(c->ptr);
247 542077
        if (c->cursor.rec.ptr == NULL) {
248 272410
                c->cursor.rec.ptr = c->ptr;
249 272410
                return (vsl_more);
250
        } else {
251 269667
                c->cursor.rec.ptr = NULL;
252 269667
                return (vsl_end);
253
        }
254 542077
}
255
256
static enum vsl_status v_matchproto_(vslc_reset_f)
257 62103
vslc_raw_reset(const struct VSL_cursor *cursor)
258
{
259
        struct vslc_raw *c;
260
261 62103
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_RAW_MAGIC);
262 62103
        assert(&c->cursor == cursor);
263
264 62103
        AN(c->ptr);
265 62103
        c->cursor.rec.ptr = NULL;
266
267 62103
        return (vsl_end);
268
}
269
270
static const struct vslc_tbl vslc_raw_tbl = {
271
        .magic  = VSLC_TBL_MAGIC,
272
        .delete = NULL,
273
        .next   = vslc_raw_next,
274
        .reset  = vslc_raw_reset,
275
        .check  = NULL,
276
};
277
278
static enum vsl_status v_matchproto_(vslc_next_f)
279 2132862
vslc_vtx_next(const struct VSL_cursor *cursor)
280
{
281
        struct vslc_vtx *c;
282
        const uint32_t *ptr;
283
        unsigned overrun;
284
285 2132862
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VTX_MAGIC);
286 2132862
        assert(&c->cursor == cursor);
287 2132862
        CHECK_OBJ_NOTNULL(c->vtx, VTX_MAGIC);
288
289 2132862
        do {
290 2294201
                CHECK_OBJ_ORNULL(c->synth, SYNTH_MAGIC);
291 2294201
                if (c->synth != NULL && c->synth->offset == c->offset) {
292
                        /* We're at the offset of the next synth record,
293
                           point to it and advance the pointer */
294 7287
                        c->cursor.rec.ptr = c->synth->data;
295 7287
                        c->synth = VTAILQ_NEXT(c->synth, list);
296 7287
                } else {
297 2286914
                        overrun = c->offset > c->vtx->len;
298 2286914
                        AZ(overrun);
299 2286914
                        if (c->offset == c->vtx->len)
300 207362
                                return (vsl_end);
301
302
                        /* Advance chunk pointer */
303 2079552
                        if (c->chunk == NULL) {
304 61216
                                c->chunk = VTAILQ_FIRST(&c->vtx->chunks);
305 61216
                                c->chunkstart = 0;
306 61216
                        }
307 2079552
                        CHECK_OBJ_NOTNULL(c->chunk, CHUNK_MAGIC);
308 2134969
                        while (c->offset >= c->chunkstart + c->chunk->len) {
309 55417
                                c->chunkstart += c->chunk->len;
310 55417
                                c->chunk = VTAILQ_NEXT(c->chunk, list);
311 55417
                                CHECK_OBJ_NOTNULL(c->chunk, CHUNK_MAGIC);
312
                        }
313
314
                        /* Point to the next stored record */
315 2079552
                        if (c->chunk->type == chunk_t_shm)
316 1709609
                                ptr = c->chunk->shm.start.ptr;
317
                        else {
318 369943
                                assert(c->chunk->type == chunk_t_buf);
319 369943
                                ptr = c->chunk->buf.data;
320
                        }
321 2079552
                        c->cursor.rec.ptr = ptr + c->offset - c->chunkstart;
322 4159104
                        c->offset += VSL_NEXT(c->cursor.rec.ptr) -
323 2079552
                            c->cursor.rec.ptr;
324
                }
325 2086839
        } while (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch);
326
327 1925500
        return (vsl_more);
328 2132862
}
329
330
static enum vsl_status v_matchproto_(vslc_reset_f)
331 86116
vslc_vtx_reset(const struct VSL_cursor *cursor)
332
{
333
        struct vslc_vtx *c;
334
335 86116
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VTX_MAGIC);
336 86116
        assert(&c->cursor == cursor);
337 86116
        CHECK_OBJ_NOTNULL(c->vtx, VTX_MAGIC);
338 86116
        c->synth = VTAILQ_FIRST(&c->vtx->synth);
339 86116
        c->chunk = NULL;
340 86116
        c->chunkstart = 0;
341 86116
        c->offset = 0;
342 86116
        c->cursor.rec.ptr = NULL;
343
344 86116
        return (vsl_end);
345
}
346
347
static const struct vslc_tbl vslc_vtx_tbl = {
348
        .magic  = VSLC_TBL_MAGIC,
349
        .delete = NULL,
350
        .next   = vslc_vtx_next,
351
        .reset  = vslc_vtx_reset,
352
        .check  = NULL,
353
};
354
355
/* Create a buf chunk */
356
static struct chunk *
357 10603
chunk_newbuf(struct vtx *vtx, const uint32_t *ptr, size_t len)
358
{
359
        struct chunk *chunk;
360
361 10603
        ALLOC_OBJ(chunk, CHUNK_MAGIC);
362 10601
        XXXAN(chunk);
363 10601
        chunk->type = chunk_t_buf;
364 10601
        chunk->vtx = vtx;
365 10601
        chunk->buf.space = VTX_BUFSIZE_MIN;
366 10601
        while (chunk->buf.space < len)
367 0
                chunk->buf.space *= 2;
368 10601
        chunk->buf.data = malloc(sizeof (uint32_t) * chunk->buf.space);
369 10601
        AN(chunk->buf.data);
370 10601
        memcpy(chunk->buf.data, ptr, sizeof (uint32_t) * len);
371 10601
        chunk->len = len;
372 10601
        return (chunk);
373
}
374
375
/* Free a buf chunk */
376
static void
377 10598
chunk_freebuf(struct chunk **pchunk)
378
{
379
        struct chunk *chunk;
380
381 10598
        TAKE_OBJ_NOTNULL(chunk, pchunk, CHUNK_MAGIC);
382 10598
        assert(chunk->type == chunk_t_buf);
383 10598
        free(chunk->buf.data);
384 10598
        FREE_OBJ(chunk);
385 10598
}
386
387
/* Append a set of records to a chunk */
388
static void
389 145801
chunk_appendbuf(struct chunk *chunk, const uint32_t *ptr, size_t len)
390
{
391
392 145801
        CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC);
393 145801
        assert(chunk->type == chunk_t_buf);
394 145801
        if (chunk->buf.space < chunk->len + len) {
395 46247
                while (chunk->buf.space < chunk->len + len)
396 23122
                        chunk->buf.space *= 2;
397 46250
                chunk->buf.data = realloc(chunk->buf.data,
398 23125
                    sizeof (uint32_t) * chunk->buf.space);
399 23125
        }
400 145801
        memcpy(chunk->buf.data + chunk->len, ptr, sizeof (uint32_t) * len);
401 145801
        chunk->len += len;
402 145801
}
403
404
/* Transform a shm chunk to a buf chunk */
405
static void
406 0
chunk_shm_to_buf(struct VSLQ *vslq, struct chunk *chunk)
407
{
408
        struct vtx *vtx;
409
        struct chunk *buf;
410
411 0
        CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC);
412 0
        assert(chunk->type == chunk_t_shm);
413 0
        vtx = chunk->vtx;
414 0
        CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
415
416 0
        buf = VTAILQ_PREV(chunk, chunkhead, list);
417 0
        if (buf != NULL && buf->type == chunk_t_buf)
418
                /* Previous is a buf chunk, append to it */
419 0
                chunk_appendbuf(buf, chunk->shm.start.ptr, chunk->len);
420
        else {
421
                /* Create a new buf chunk and insert it before this */
422 0
                buf = chunk_newbuf(vtx, chunk->shm.start.ptr, chunk->len);
423 0
                AN(buf);
424 0
                VTAILQ_INSERT_BEFORE(chunk, buf, list);
425
        }
426
427
        /* Reset cursor chunk pointer, vslc_vtx_next will set it correctly */
428 0
        vtx->c.chunk = NULL;
429
430
        /* Remove from the shmref list and vtx, and put chunk back
431
           on the free list */
432 0
        VTAILQ_REMOVE(&vslq->shmrefs, chunk, shm.shmref);
433 0
        VTAILQ_REMOVE(&vtx->chunks, chunk, list);
434 0
        VTAILQ_INSERT_HEAD(&vtx->shmchunks_free, chunk, list);
435 0
}
436
437
/* Append a set of records to a vtx structure */
438
static enum vsl_status
439 209890
vtx_append(struct VSLQ *vslq, struct vtx *vtx, const struct VSLC_ptr *start,
440
    size_t len)
441
{
442
        struct chunk *chunk;
443
        enum vsl_check i;
444
445 209890
        AN(vtx);
446 209890
        AN(len);
447 209890
        AN(start);
448
449 209890
        i = VSL_Check(vslq->c, start);
450 209890
        if (i == vsl_check_e_inval)
451 0
                return (vsl_e_overrun);
452
453 209890
        if (i == vsl_check_valid && !VTAILQ_EMPTY(&vtx->shmchunks_free)) {
454
                /* Shmref it */
455 53472
                chunk = VTAILQ_FIRST(&vtx->shmchunks_free);
456 53472
                CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC);
457 53472
                assert(chunk->type == chunk_t_shm);
458 53472
                assert(chunk->vtx == vtx);
459 53472
                VTAILQ_REMOVE(&vtx->shmchunks_free, chunk, list);
460 53472
                chunk->shm.start = *start;
461 53472
                chunk->len = len;
462 53472
                VTAILQ_INSERT_TAIL(&vtx->chunks, chunk, list);
463
464
                /* Append to shmref list */
465 53472
                VTAILQ_INSERT_TAIL(&vslq->shmrefs, chunk, shm.shmref);
466 53472
        } else {
467
                /* Buffer it */
468 156418
                chunk = VTAILQ_LAST(&vtx->chunks, chunkhead);
469 156418
                CHECK_OBJ_ORNULL(chunk, CHUNK_MAGIC);
470 156418
                if (chunk != NULL && chunk->type == chunk_t_buf) {
471
                        /* Tail is a buf chunk, append to that */
472 145818
                        chunk_appendbuf(chunk, start->ptr, len);
473 145818
                } else {
474
                        /* Append new buf chunk */
475 10600
                        chunk = chunk_newbuf(vtx, start->ptr, len);
476 10600
                        AN(chunk);
477 10600
                        VTAILQ_INSERT_TAIL(&vtx->chunks, chunk, list);
478
                }
479
        }
480 209890
        vtx->len += len;
481 209890
        return (vsl_more);
482 209890
}
483
484
/* Allocate a new vtx structure */
485
static struct vtx *
486 31185
vtx_new(struct VSLQ *vslq)
487
{
488
        struct vtx *vtx;
489
        int i;
490
491 31185
        AN(vslq);
492 31185
        if (vslq->n_cache) {
493 16913
                AZ(VTAILQ_EMPTY(&vslq->cache));
494 16913
                vtx = VTAILQ_FIRST(&vslq->cache);
495 16913
                VTAILQ_REMOVE(&vslq->cache, vtx, list_child);
496 16913
                vslq->n_cache--;
497 16913
        } else {
498 14272
                ALLOC_OBJ(vtx, VTX_MAGIC);
499 14264
                AN(vtx);
500
501 14264
                VTAILQ_INIT(&vtx->child);
502 14264
                VTAILQ_INIT(&vtx->shmchunks_free);
503 57059
                for (i = 0; i < VTX_SHMCHUNKS; i++) {
504 42795
                        vtx->shmchunks[i].magic = CHUNK_MAGIC;
505 42795
                        vtx->shmchunks[i].type = chunk_t_shm;
506 42795
                        vtx->shmchunks[i].vtx = vtx;
507 42795
                        VTAILQ_INSERT_TAIL(&vtx->shmchunks_free,
508
                            &vtx->shmchunks[i], list);
509 42795
                }
510 14264
                VTAILQ_INIT(&vtx->chunks);
511 14264
                VTAILQ_INIT(&vtx->synth);
512 14264
                vtx->c.magic = VSLC_VTX_MAGIC;
513 14264
                vtx->c.vtx = vtx;
514 14264
                vtx->c.cursor.priv_tbl = &vslc_vtx_tbl;
515 14264
                vtx->c.cursor.priv_data = &vtx->c;
516
        }
517
518 31177
        CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
519 31177
        vtx->key.vxid = 0;
520 31177
        vtx->t_start = VTIM_mono();
521 31177
        vtx->flags = 0;
522 31177
        vtx->type = VSL_t_unknown;
523 31177
        vtx->reason = VSL_r_unknown;
524 31177
        vtx->parent = NULL;
525 31177
        vtx->n_child = 0;
526 31177
        vtx->n_childready = 0;
527 31177
        vtx->n_descend = 0;
528 31177
        vtx->len = 0;
529 31177
        AN(vslc_vtx_reset(&vtx->c.cursor) == vsl_end);
530
531 31177
        return (vtx);
532
}
533
534
/* Disuse a vtx and all it's children, freeing any resources held. Free or
535
   cache the vtx for later use */
536
static void
537 31170
vtx_retire(struct VSLQ *vslq, struct vtx **pvtx)
538
{
539
        struct vtx *vtx;
540
        struct vtx *child;
541
        struct synth *synth;
542
        struct chunk *chunk;
543
544 31170
        AN(vslq);
545 31170
        TAKE_OBJ_NOTNULL(vtx, pvtx, VTX_MAGIC);
546
547 31170
        AN(vtx->flags & VTX_F_COMPLETE);
548 31170
        AN(vtx->flags & VTX_F_READY);
549 31170
        AZ(vtx->parent);
550
551 34557
        while (!VTAILQ_EMPTY(&vtx->child)) {
552 3387
                child = VTAILQ_FIRST(&vtx->child);
553 3387
                assert(child->parent == vtx);
554 3387
                AN(vtx->n_child);
555 3387
                assert(vtx->n_descend >= child->n_descend + 1);
556 3387
                VTAILQ_REMOVE(&vtx->child, child, list_child);
557 3387
                child->parent = NULL;
558 3387
                vtx->n_child--;
559 3387
                vtx->n_descend -= child->n_descend + 1;
560 3387
                vtx_retire(vslq, &child);
561 3387
                AZ(child);
562
        }
563 31170
        AZ(vtx->n_child);
564 31170
        AZ(vtx->n_descend);
565 31170
        vtx->n_childready = 0;
566
        // remove rval is no way to check if element was present
567 31170
        (void)VRBT_REMOVE(vtx_tree, &vslq->tree, &vtx->key);
568 31170
        vtx->key.vxid = 0;
569 31170
        vtx->flags = 0;
570
571 38357
        while (!VTAILQ_EMPTY(&vtx->synth)) {
572 7187
                synth = VTAILQ_FIRST(&vtx->synth);
573 7187
                CHECK_OBJ_NOTNULL(synth, SYNTH_MAGIC);
574 7187
                VTAILQ_REMOVE(&vtx->synth, synth, list);
575 7187
                FREE_OBJ(synth);
576
        }
577
578 95219
        while (!VTAILQ_EMPTY(&vtx->chunks)) {
579 64049
                chunk = VTAILQ_FIRST(&vtx->chunks);
580 64049
                CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC);
581 64049
                VTAILQ_REMOVE(&vtx->chunks, chunk, list);
582 64049
                if (chunk->type == chunk_t_shm) {
583 53454
                        VTAILQ_REMOVE(&vslq->shmrefs, chunk, shm.shmref);
584 53454
                        VTAILQ_INSERT_HEAD(&vtx->shmchunks_free, chunk, list);
585 53454
                } else {
586 10595
                        assert(chunk->type == chunk_t_buf);
587 10595
                        chunk_freebuf(&chunk);
588 10595
                        AZ(chunk);
589
                }
590
        }
591 31170
        vtx->len = 0;
592 31170
        AN(vslq->n_outstanding);
593 31170
        vslq->n_outstanding--;
594
595 31170
        if (vslq->n_cache < VTX_CACHE) {
596 30646
                VTAILQ_INSERT_HEAD(&vslq->cache, vtx, list_child);
597 30646
                vslq->n_cache++;
598 30646
        } else
599 524
                FREE_OBJ(vtx);
600
601 31170
}
602
603
/* Lookup a vtx by vxid from the managed list */
604
static struct vtx *
605 230324
vtx_lookup(const struct VSLQ *vslq, uint64_t vxid)
606
{
607
        struct vtx_key lkey, *key;
608
        struct vtx *vtx;
609
610 230324
        AN(vslq);
611 230324
        lkey.vxid = vxid;
612 230324
        key = VRBT_FIND(vtx_tree, &vslq->tree, &lkey);
613 230324
        if (key == NULL)
614 46340
                return (NULL);
615 183984
        CAST_OBJ_NOTNULL(vtx, (void *)key, VTX_MAGIC);
616 183984
        return (vtx);
617 230324
}
618
619
/* Insert a new vtx into the managed list */
620
static struct vtx *
621 31169
vtx_add(struct VSLQ *vslq, uint64_t vxid)
622
{
623
        struct vtx *vtx;
624
625 31169
        AN(vslq);
626 31169
        vtx = vtx_new(vslq);
627 31169
        AN(vtx);
628 31169
        vtx->key.vxid = vxid;
629 31169
        AZ(VRBT_INSERT(vtx_tree, &vslq->tree, &vtx->key));
630 31169
        VTAILQ_INSERT_TAIL(&vslq->incomplete, vtx, list_vtx);
631 31169
        vslq->n_outstanding++;
632 31169
        return (vtx);
633
}
634
635
/* Mark a vtx complete, update child counters and if possible push it or
636
   it's top parent to the ready state */
637
static void
638 31165
vtx_mark_complete(struct VSLQ *vslq, struct vtx *vtx)
639
{
640
641 31165
        AN(vslq);
642 31165
        AN(vtx->flags & VTX_F_END);
643 31165
        AZ(vtx->flags & VTX_F_COMPLETE);
644
645 31165
        if (vtx->type == VSL_t_unknown)
646 0
                vtx_diag(vtx, "vtx of unknown type marked complete");
647
648 31165
        vtx->flags |= VTX_F_COMPLETE;
649 31165
        VTAILQ_REMOVE(&vslq->incomplete, vtx, list_vtx);
650
651 34549
        while (1) {
652 34549
                AZ(vtx->flags & VTX_F_READY);
653 34549
                if (vtx->flags & VTX_F_COMPLETE &&
654 31312
                    vtx->n_child == vtx->n_childready)
655 31165
                        vtx->flags |= VTX_F_READY;
656
                else
657 3384
                        return;
658 31165
                if (vtx->parent == NULL) {
659
                        /* Top level vtx ready */
660 27781
                        VTAILQ_INSERT_TAIL(&vslq->ready, vtx, list_vtx);
661 27781
                        return;
662
                }
663 3384
                vtx = vtx->parent;
664 3384
                vtx->n_childready++;
665 3384
                assert(vtx->n_child >= vtx->n_childready);
666
        }
667 31165
}
668
669
/* Add a child to a parent, and update child counters */
670
static void
671 3379
vtx_set_parent(struct vtx *parent, struct vtx *child)
672
{
673
674 3379
        CHECK_OBJ_NOTNULL(parent, VTX_MAGIC);
675 3379
        CHECK_OBJ_NOTNULL(child, VTX_MAGIC);
676 3379
        assert(parent != child);
677 3379
        AZ(parent->flags & VTX_F_COMPLETE);
678 3379
        AZ(child->flags & VTX_F_COMPLETE);
679 3379
        AZ(child->parent);
680 3379
        child->parent = parent;
681 3379
        VTAILQ_INSERT_TAIL(&parent->child, child, list_child);
682 3379
        parent->n_child++;
683 3379
        do
684 4029
                parent->n_descend += 1 + child->n_descend;
685 4029
        while ((parent = parent->parent) != NULL);
686 3379
}
687
688
/* Parse a begin or link record. Returns the number of elements that was
689
   successfully parsed. */
690
static int
691 83564
vtx_parse_link(const char *str, enum VSL_transaction_e *ptype,
692
    uint64_t *pvxid, enum VSL_reason_e *preason, uint64_t *psub)
693
{
694
        char type[16], reason[16];
695
        uintmax_t vxid, sub;
696
        int i;
697
        enum VSL_transaction_e et;
698
        enum VSL_reason_e er;
699
700 83564
        AN(str);
701 83564
        AN(ptype);
702 83564
        AN(pvxid);
703 83564
        AN(preason);
704
705 83564
        i = sscanf(str, "%15s %ju %15s %ju", type, &vxid, reason, &sub);
706 83564
        if (i < 1)
707 0
                return (0);
708
709
        /* transaction type */
710 257296
        for (et = VSL_t_unknown; et < VSL_t__MAX; et++)
711 257287
                if (!strcmp(type, vsl_t_names[et]))
712 83555
                        break;
713 83564
        if (et >= VSL_t__MAX)
714 0
                et = VSL_t_unknown;
715 83552
        *ptype = et;
716 83552
        if (i == 1)
717 0
                return (1);
718
719
        /* vxid */
720 83552
        assert((vxid & ~VSL_IDENTMASK) == 0);
721 83552
        *pvxid = vxid;
722 83552
        if (i == 2)
723 0
                return (2);
724
725
        /* transaction reason */
726 346880
        for (er = VSL_r_unknown; er < VSL_r__MAX; er++)
727 345078
                if (!strcmp(reason, vsl_r_names[er]))
728 81750
                        break;
729 83552
        if (er >= VSL_r__MAX)
730 1801
                er = VSL_r_unknown;
731 83552
        *preason = er;
732 83552
        if (i == 3)
733 79752
                return (3);
734
735
        /* request sub-level */
736 3800
        if (psub != NULL)
737 1375
                *psub = sub;
738 3800
        return (4);
739 83552
}
740
741
/* Parse and process a begin record */
742
static int
743 31178
vtx_scan_begin(struct VSLQ *vslq, struct vtx *vtx, const uint32_t *ptr)
744
{
745
        int i;
746
        enum VSL_transaction_e type;
747
        enum VSL_reason_e reason;
748
        uint64_t p_vxid;
749
        struct vtx *p_vtx;
750
751 31178
        assert(VSL_TAG(ptr) == SLT_Begin);
752
753 31178
        AZ(vtx->flags & VTX_F_READY);
754
755 31178
        i = vtx_parse_link(VSL_CDATA(ptr), &type, &p_vxid, &reason, NULL);
756 31178
        if (i < 3)
757 0
                return (vtx_diag_tag(vtx, ptr, "parse error"));
758 31178
        if (type == VSL_t_unknown)
759 0
                (void)vtx_diag_tag(vtx, ptr, "unknown vxid type");
760
761
        /* Check/set vtx type */
762 31178
        if (vtx->type != VSL_t_unknown && vtx->type != type)
763
                /* Type not matching the one previously set by a link
764
                   record */
765 0
                (void)vtx_diag_tag(vtx, ptr, "type mismatch");
766 31178
        vtx->type = type;
767 31178
        vtx->reason = reason;
768
769 31178
        if (p_vxid == 0)
770
                /* Zero means no parent */
771 9030
                return (0);
772 22148
        if (p_vxid == vtx->key.vxid)
773 0
                return (vtx_diag_tag(vtx, ptr, "link to self"));
774
775 22148
        if (vslq->grouping == VSL_g_vxid)
776 18060
                return (0);     /* No links */
777 4088
        if (vslq->grouping == VSL_g_request && vtx->type == VSL_t_req &&
778 1225
            vtx->reason == VSL_r_rxreq)
779 700
                return (0);     /* No links */
780
781 3388
        if (vtx->parent != NULL) {
782 1563
                if (vtx->parent->key.vxid != p_vxid) {
783
                        /* This vtx already belongs to a different
784
                           parent */
785 0
                        return (vtx_diag_tag(vtx, ptr, "link mismatch"));
786
                } else
787
                        /* Link already exists */
788 1563
                        return (0);
789
        }
790
791 1825
        p_vtx = vtx_lookup(vslq, p_vxid);
792 1825
        if (p_vtx == NULL) {
793
                /* Not seen parent yet. Create it. */
794 775
                p_vtx = vtx_add(vslq, p_vxid);
795 775
                AN(p_vtx);
796 775
        } else {
797 1050
                CHECK_OBJ_NOTNULL(p_vtx, VTX_MAGIC);
798 1050
                if (p_vtx->flags & VTX_F_COMPLETE)
799 0
                        return (vtx_diag_tag(vtx, ptr, "link too late"));
800
        }
801
802
        /* Create link */
803 1825
        vtx_set_parent(p_vtx, vtx);
804
805 1825
        return (0);
806 31178
}
807
808
/* Parse and process a link record */
809
static int
810 23010
vtx_scan_link(struct VSLQ *vslq, struct vtx *vtx, const uint32_t *ptr)
811
{
812
        int i;
813
        enum VSL_transaction_e c_type;
814
        enum VSL_reason_e c_reason;
815
        uint64_t c_vxid;
816
        struct vtx *c_vtx;
817
818 23010
        assert(VSL_TAG(ptr) == SLT_Link);
819
820 23010
        AZ(vtx->flags & VTX_F_READY);
821
822 23010
        i = vtx_parse_link(VSL_CDATA(ptr), &c_type, &c_vxid, &c_reason, NULL);
823 23010
        if (i < 3)
824 0
                return (vtx_diag_tag(vtx, ptr, "parse error"));
825 23010
        if (c_type == VSL_t_unknown)
826 0
                (void)vtx_diag_tag(vtx, ptr, "unknown vxid type");
827
828 23010
        if (vslq->grouping == VSL_g_vxid)
829 19628
                return (0);     /* No links */
830 3382
        if (vslq->grouping == VSL_g_request && vtx->type == VSL_t_sess)
831 0
                return (0);     /* No links */
832
833 3382
        if (c_vxid == 0)
834 0
                return (vtx_diag_tag(vtx, ptr, "illegal link vxid"));
835 3382
        if (c_vxid == vtx->key.vxid)
836 0
                return (vtx_diag_tag(vtx, ptr, "link to self"));
837
838
        /* Lookup and check child vtx */
839 3382
        c_vtx = vtx_lookup(vslq, c_vxid);
840 3382
        if (c_vtx == NULL) {
841
                /* Child not seen before. Insert it and create link */
842 1557
                c_vtx = vtx_add(vslq, c_vxid);
843 1557
                AN(c_vtx);
844 1557
                AZ(c_vtx->parent);
845 1557
                c_vtx->type = c_type;
846 1557
                c_vtx->reason = c_reason;
847 1557
                vtx_set_parent(vtx, c_vtx);
848 1557
                return (0);
849
        }
850
851 1825
        CHECK_OBJ_NOTNULL(c_vtx, VTX_MAGIC);
852 1825
        if (c_vtx->parent == vtx)
853
                /* Link already exists */
854 1825
                return (0);
855 0
        if (c_vtx->parent != NULL && c_vtx->parent != vtx)
856 0
                return (vtx_diag_tag(vtx, ptr, "duplicate link"));
857 0
        if (c_vtx->flags & VTX_F_COMPLETE)
858 0
                return (vtx_diag_tag(vtx, ptr, "link too late"));
859 0
        if (c_vtx->type != VSL_t_unknown && c_vtx->type != c_type)
860 0
                (void)vtx_diag_tag(vtx, ptr, "type mismatch");
861
862 0
        c_vtx->type = c_type;
863 0
        c_vtx->reason = c_reason;
864 0
        vtx_set_parent(vtx, c_vtx);
865 0
        return (0);
866 23010
}
867
868
/* Scan the records of a vtx, performing processing actions on specific
869
   records */
870
static void
871 216855
vtx_scan(struct VSLQ *vslq, struct vtx *vtx)
872
{
873
        const uint32_t *ptr;
874
        enum VSL_tag_e tag;
875
876 2400212
        while (!(vtx->flags & VTX_F_COMPLETE) &&
877 1184112
            vslc_vtx_next(&vtx->c.cursor) == 1) {
878 999245
                ptr = vtx->c.cursor.rec.ptr;
879 999245
                if (VSL_ID(ptr) != vtx->key.vxid) {
880 0
                        (void)vtx_diag_tag(vtx, ptr, "vxid mismatch");
881 0
                        continue;
882
                }
883
884 999245
                tag = VSL_TAG(ptr);
885 999245
                assert(tag != SLT__Batch);
886
887 999245
                switch (tag) {
888
                case SLT_Begin:
889 31180
                        if (vtx->flags & VTX_F_BEGIN)
890 0
                                (void)vtx_diag_tag(vtx, ptr, "duplicate begin");
891
                        else {
892 31180
                                (void)vtx_scan_begin(vslq, vtx, ptr);
893 31180
                                vtx->flags |= VTX_F_BEGIN;
894
                        }
895 31180
                        break;
896
897
                case SLT_Link:
898 23012
                        (void)vtx_scan_link(vslq, vtx, ptr);
899 23012
                        break;
900
901
                case SLT_End:
902 31166
                        AZ(vtx->flags & VTX_F_END);
903 31166
                        vtx->flags |= VTX_F_END;
904 31166
                        vtx_mark_complete(vslq, vtx);
905 31166
                        break;
906
907
                default:
908 913887
                        break;
909
                }
910
        }
911 216855
}
912
913
/* Force a vtx into complete status by synthing the necessary outstanding
914
   records */
915
static void
916 3593
vtx_force(struct VSLQ *vslq, struct vtx *vtx, const char *reason)
917
{
918
919 3593
        AZ(vtx->flags & VTX_F_COMPLETE);
920 3593
        AZ(vtx->flags & VTX_F_READY);
921 3593
        vtx_scan(vslq, vtx);
922 3593
        if (!(vtx->flags & VTX_F_BEGIN))
923 2
                vtx_synth_rec(vtx, SLT_Begin, "%s %u synth",
924 1
                    vsl_t_names[vtx->type], 0);
925 3593
        vtx_diag(vtx, reason);
926 3593
        if (!(vtx->flags & VTX_F_END))
927 3593
                vtx_synth_rec(vtx, SLT_End, "synth");
928 3593
        vtx_scan(vslq, vtx);
929 3593
        AN(vtx->flags & VTX_F_COMPLETE);
930 3593
}
931
932
static int
933 275
vslq_ratelimit(struct VSLQ *vslq)
934
{
935
        vtim_mono now;
936
        vtim_dur delta;
937
938 275
        CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC);
939 275
        CHECK_OBJ_NOTNULL(vslq->vsl, VSL_MAGIC);
940
941 275
        now = VTIM_mono();
942 275
        delta = now - vslq->last_use;
943 275
        vslq->credits += (delta / vslq->vsl->R_opt_p) * vslq->vsl->R_opt_l;
944 275
        vslq->credits = vmin_t(double, vslq->credits, vslq->vsl->R_opt_l);
945 275
        vslq->last_use = now;
946
947 275
        if (vslq->credits < 1.0)
948 0
                return (0);
949
950 275
        vslq->credits -= 1.0;
951 275
        return (1);
952 275
}
953
954
/* Build transaction array, do the query and callback. Returns 0 or the
955
   return value from func */
956
static int
957 24243
vslq_callback(struct VSLQ *vslq, struct vtx *vtx, VSLQ_dispatch_f *func,
958
    void *priv)
959
{
960 24243
        unsigned n = vtx->n_descend + 1;
961 24243
        struct vtx *vtxs[n];
962 24243
        struct VSL_transaction trans[n];
963 24243
        struct VSL_transaction *ptrans[n + 1];
964
        unsigned i, j;
965
966 24243
        AN(vslq);
967 24243
        CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
968 24243
        AN(vtx->flags & VTX_F_READY);
969 24243
        AN(func);
970
971 24243
        if (vslq->grouping == VSL_g_session &&
972 548
            vtx->type != VSL_t_sess)
973 0
                return (0);
974 24243
        if (vslq->grouping == VSL_g_request &&
975 700
            vtx->type != VSL_t_req)
976 0
                return (0);
977
978
        /* Build transaction array */
979 24243
        AN(vslc_vtx_reset(&vtx->c.cursor) == vsl_end);
980 24243
        vtxs[0] = vtx;
981 24243
        trans[0].level = 1;
982 24243
        trans[0].vxid = vtx->key.vxid;
983 24243
        trans[0].vxid_parent = 0;
984 24243
        trans[0].type = vtx->type;
985 24243
        trans[0].reason = vtx->reason;
986 24243
        trans[0].c = &vtx->c.cursor;
987 24243
        i = 1;
988 24243
        j = 0;
989 51464
        while (j < i) {
990 30194
                VTAILQ_FOREACH(vtx, &vtxs[j]->child, list_child) {
991 2973
                        assert(i < n);
992 2973
                        AN(vslc_vtx_reset(&vtx->c.cursor) == vsl_end);
993 2973
                        vtxs[i] = vtx;
994 2973
                        if (vtx->reason == VSL_r_restart)
995
                                /* Restarts stay at the same level as parent */
996 50
                                trans[i].level = trans[j].level;
997
                        else
998 2923
                                trans[i].level = trans[j].level + 1;
999 2973
                        trans[i].vxid = vtx->key.vxid;
1000 2973
                        trans[i].vxid_parent = trans[j].vxid;
1001 2973
                        trans[i].type = vtx->type;
1002 2973
                        trans[i].reason = vtx->reason;
1003 2973
                        trans[i].c = &vtx->c.cursor;
1004 2973
                        i++;
1005 2973
                }
1006 27221
                j++;
1007
        }
1008 24243
        assert(i == n);
1009
1010
        /* Build pointer array */
1011 51457
        for (i = 0; i < n; i++)
1012 27214
                ptrans[i] = &trans[i];
1013 24243
        ptrans[i] = NULL;
1014
1015
        /* Query test goes here */
1016 24243
        if (vslq->query != NULL && !vslq_runquery(vslq->query, ptrans))
1017 8693
                return (0);
1018
1019 15550
        if (vslq->vsl->R_opt_l != 0 && !vslq_ratelimit(vslq))
1020 0
                return (0);
1021
1022
        /* Callback */
1023 15550
        return ((func)(vslq->vsl, ptrans, priv));
1024 24243
}
1025
1026
/* Create a synthetic log record. The record will be inserted at the
1027
   current cursor offset */
1028
static void
1029 7187
vtx_synth_rec(struct vtx *vtx, unsigned tag, const char *fmt, ...)
1030
{
1031
        struct synth *synth, *it;
1032
        va_list ap;
1033
        char *buf;
1034
        int l, buflen;
1035
        uint64_t vxid;
1036
1037 7187
        ALLOC_OBJ(synth, SYNTH_MAGIC);
1038 7187
        AN(synth);
1039
1040 7187
        buf = VSL_DATA(synth->data);
1041 7187
        buflen = sizeof(synth->data) - VSL_BYTES(VSL_OVERHEAD);
1042 7187
        va_start(ap, fmt);
1043 7187
        l = vsnprintf(buf, buflen, fmt, ap);
1044 7187
        assert(l >= 0);
1045 7187
        va_end(ap);
1046 7187
        if (l > buflen - 1)
1047 0
                l = buflen - 1;
1048 7187
        buf[l++] = '\0';        /* NUL-terminated */
1049 7187
        vxid = vtx->key.vxid;
1050 7187
        switch (vtx->type) {
1051
        case VSL_t_req:
1052 403
                vxid |= VSL_CLIENTMARKER;
1053 403
                break;
1054
        case VSL_t_bereq:
1055 50
                vxid |= VSL_BACKENDMARKER;
1056 50
                break;
1057
        default:
1058 6734
                break;
1059
        }
1060 7187
        synth->data[2] = vxid >> 32;
1061 7187
        synth->data[1] = vxid;
1062 14374
        synth->data[0] = (((tag & VSL_IDMASK) << VSL_IDSHIFT) |
1063 7187
            (VSL_VERSION_3 << VSL_VERSHIFT) | l);
1064 7187
        synth->offset = vtx->c.offset;
1065
1066 7187
        VTAILQ_FOREACH_REVERSE(it, &vtx->synth, synthhead, list) {
1067
                /* Make sure the synth list is sorted on offset */
1068 3594
                CHECK_OBJ_NOTNULL(it, SYNTH_MAGIC);
1069 3594
                if (synth->offset >= it->offset)
1070 3594
                        break;
1071 0
        }
1072 7187
        if (it != NULL)
1073 3594
                VTAILQ_INSERT_AFTER(&vtx->synth, it, synth, list);
1074
        else
1075 3593
                VTAILQ_INSERT_HEAD(&vtx->synth, synth, list);
1076
1077
        /* Update cursor */
1078 7187
        CHECK_OBJ_ORNULL(vtx->c.synth, SYNTH_MAGIC);
1079 7187
        if (vtx->c.synth == NULL || vtx->c.synth->offset > synth->offset)
1080 3593
                vtx->c.synth = synth;
1081 7187
}
1082
1083
/* Add a diagnostic SLT_VSL synth record to the vtx. */
1084
static int
1085 3593
vtx_diag(struct vtx *vtx, const char *msg)
1086
{
1087
1088 3593
        vtx_synth_rec(vtx, SLT_VSL, msg);
1089 3593
        return (-1);
1090
}
1091
1092
/* Add a SLT_VSL diag synth record to the vtx. Takes an offending record
1093
   that will be included in the log record */
1094
static int
1095 0
vtx_diag_tag(struct vtx *vtx, const uint32_t *ptr, const char *reason)
1096
{
1097
1098 0
        vtx_synth_rec(vtx, SLT_VSL, "%s (%ju:%s \"%.*s\")", reason, VSL_ID(ptr),
1099 0
            VSL_tags[VSL_TAG(ptr)], (int)VSL_LEN(ptr), VSL_CDATA(ptr));
1100 0
        return (-1);
1101
}
1102
1103
struct VSLQ *
1104 9350
VSLQ_New(struct VSL_data *vsl, struct VSL_cursor **cp,
1105
    enum VSL_grouping_e grouping, const char *querystring)
1106
{
1107
        struct vslq_query *query;
1108
        struct VSLQ *vslq;
1109
1110 9350
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
1111 9350
        if (grouping >= VSL_g__MAX) {
1112 0
                (void)vsl_diag(vsl, "Illegal query grouping");
1113 0
                return (NULL);
1114
        }
1115 9350
        if (querystring != NULL) {
1116 4250
                query = vslq_newquery(vsl, grouping, querystring);
1117 4250
                if (query == NULL)
1118 800
                        return (NULL);
1119 3450
        } else
1120 5100
                query = NULL;
1121
1122 8550
        ALLOC_OBJ(vslq, VSLQ_MAGIC);
1123 8550
        AN(vslq);
1124 8550
        vslq->vsl = vsl;
1125 8550
        if (cp != NULL) {
1126 6500
                vslq->c = *cp;
1127 6500
                *cp = NULL;
1128 6500
        }
1129 8550
        vslq->grouping = grouping;
1130 8550
        vslq->query = query;
1131 8550
        if (vslq->vsl->R_opt_l != 0) {
1132 50
                vslq->last_use = VTIM_mono();
1133 50
                vslq->credits = 1;
1134 50
        }
1135
1136
        /* Setup normal mode */
1137 8550
        VRBT_INIT(&vslq->tree);
1138 8550
        VTAILQ_INIT(&vslq->ready);
1139 8550
        VTAILQ_INIT(&vslq->incomplete);
1140 8550
        VTAILQ_INIT(&vslq->shmrefs);
1141 8550
        VTAILQ_INIT(&vslq->cache);
1142
1143
        /* Setup raw mode */
1144 8550
        vslq->raw.c.magic = VSLC_RAW_MAGIC;
1145 8550
        vslq->raw.c.cursor.priv_tbl = &vslc_raw_tbl;
1146 8550
        vslq->raw.c.cursor.priv_data = &vslq->raw.c;
1147 8550
        vslq->raw.trans.level = 0;
1148 8550
        vslq->raw.trans.type = VSL_t_raw;
1149 8550
        vslq->raw.trans.reason = VSL_r_unknown;
1150 8550
        vslq->raw.trans.c = &vslq->raw.c.cursor;
1151 8550
        vslq->raw.ptrans[0] = &vslq->raw.trans;
1152 8550
        vslq->raw.ptrans[1] = NULL;
1153
1154 8550
        return (vslq);
1155 9350
}
1156
1157
void
1158 8400
VSLQ_Delete(struct VSLQ **pvslq)
1159
{
1160
        struct VSLQ *vslq;
1161
        struct vtx *vtx;
1162
1163 8400
        TAKE_OBJ_NOTNULL(vslq, pvslq, VSLQ_MAGIC);
1164
1165 8400
        (void)VSLQ_Flush(vslq, NULL, NULL);
1166 8400
        AZ(vslq->n_outstanding);
1167
1168 8400
        if (vslq->c != NULL) {
1169 8400
                VSL_DeleteCursor(vslq->c);
1170 8400
                vslq->c = NULL;
1171 8400
        }
1172
1173 8400
        if (vslq->query != NULL)
1174 3450
                vslq_deletequery(&vslq->query);
1175 8400
        AZ(vslq->query);
1176
1177 22145
        while (!VTAILQ_EMPTY(&vslq->cache)) {
1178 13745
                AN(vslq->n_cache);
1179 13745
                vtx = VTAILQ_FIRST(&vslq->cache);
1180 13745
                CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
1181 13745
                VTAILQ_REMOVE(&vslq->cache, vtx, list_child);
1182 13745
                vslq->n_cache--;
1183 13745
                FREE_OBJ(vtx);
1184
        }
1185
1186 8400
        FREE_OBJ(vslq);
1187 8400
}
1188
1189
void
1190 1900
VSLQ_SetCursor(struct VSLQ *vslq, struct VSL_cursor **cp)
1191
{
1192
1193 1900
        CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC);
1194
1195 1900
        if (vslq->c != NULL) {
1196 0
                (void)VSLQ_Flush(vslq, NULL, NULL);
1197 0
                AZ(vslq->n_outstanding);
1198 0
                VSL_DeleteCursor(vslq->c);
1199 0
                vslq->c = NULL;
1200 0
        }
1201
1202 1900
        if (cp != NULL) {
1203 1900
                AN(*cp);
1204 1900
                vslq->c = *cp;
1205 1900
                *cp = NULL;
1206 1900
        }
1207 1900
}
1208
1209
/* Regard each log line as a single transaction, feed it through the query
1210
   and do the callback */
1211
static int
1212 416726
vslq_raw(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv)
1213
{
1214 416726
        enum vsl_status r = vsl_more;
1215
        int i;
1216
1217 416726
        assert(vslq->grouping == VSL_g_raw);
1218
1219 416726
        assert(vslq->raw.offset <= vslq->raw.len);
1220 416726
        do {
1221 459086
                if (vslq->raw.offset == vslq->raw.len) {
1222 243823
                        r = VSL_Next(vslq->c);
1223 243823
                        if (r != vsl_more)
1224 127884
                                return (r);
1225 115939
                        AN(vslq->c->rec.ptr);
1226 115939
                        vslq->raw.start = vslq->c->rec;
1227 115939
                        if (VSL_TAG(vslq->c->rec.ptr) == SLT__Batch)
1228 84742
                                vslq->raw.len = VSL_END(vslq->c->rec.ptr,
1229 42371
                                    VSL_BATCHLEN(vslq->c->rec.ptr)) -
1230 42371
                                    vslq->c->rec.ptr;
1231
                        else
1232 147136
                                vslq->raw.len = VSL_NEXT(vslq->raw.start.ptr) -
1233 73568
                                    vslq->raw.start.ptr;
1234 115939
                        assert(vslq->raw.len > 0);
1235 115939
                        vslq->raw.offset = 0;
1236 115939
                }
1237
1238 331202
                vslq->raw.c.ptr = vslq->raw.start.ptr + vslq->raw.offset;
1239 331202
                vslq->raw.c.cursor.rec.ptr = NULL;
1240 331202
                vslq->raw.trans.vxid = VSL_ID(vslq->raw.c.ptr);
1241 331202
                vslq->raw.offset += VSL_NEXT(vslq->raw.c.ptr) - vslq->raw.c.ptr;
1242 331202
        } while (VSL_TAG(vslq->raw.c.ptr) == SLT__Batch);
1243
1244 288842
        assert (r == vsl_more);
1245
1246 288842
        if (func == NULL)
1247 0
                return (r);
1248
1249 288842
        if (vslq->query != NULL &&
1250 40489
            !vslq_runquery(vslq->query, vslq->raw.ptrans))
1251 38039
                return (r);
1252
1253 250803
        if (vslq->vsl->R_opt_l != 0 && !vslq_ratelimit(vslq))
1254 0
                return (r);
1255
1256 250803
        i = (func)(vslq->vsl, vslq->raw.ptrans, priv);
1257 250803
        if (i)
1258 2250
                return (i);
1259
1260 248553
        return (r);
1261 416726
}
1262
1263
/* Check the beginning of the shmref list, and buffer refs that are at
1264
 * warning level.
1265
 */
1266
static enum vsl_status
1267 289137
vslq_shmref_check(struct VSLQ *vslq)
1268
{
1269
        struct chunk *chunk;
1270
        enum vsl_check i;
1271
1272 289137
        while ((chunk = VTAILQ_FIRST(&vslq->shmrefs)) != NULL) {
1273 214259
                CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC);
1274 214259
                assert(chunk->type == chunk_t_shm);
1275 214259
                i = VSL_Check(vslq->c, &chunk->shm.start);
1276 214259
                switch (i) {
1277
                case vsl_check_valid:
1278
                        /* First on list is OK, refs behind it must also
1279
                           be OK */
1280 214259
                        return (vsl_more);
1281
                case vsl_check_warn:
1282
                        /* Buffer this chunk */
1283 0
                        chunk_shm_to_buf(vslq, chunk);
1284 0
                        break;
1285
                default:
1286
                        /* Too late to buffer */
1287 0
                        return (vsl_e_overrun);
1288
                }
1289
        }
1290
1291 74878
        return (vsl_more);
1292 289137
}
1293
1294
static unsigned
1295 34550
vslq_candidate(struct VSLQ *vslq, const uint32_t *ptr)
1296
{
1297
        enum VSL_transaction_e type;
1298
        enum VSL_reason_e reason;
1299
        struct VSL_data *vsl;
1300
        enum VSL_tag_e tag;
1301
        uint64_t p_vxid, sub;
1302
        int i;
1303
1304 34550
        CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC);
1305 34550
        AN(ptr);
1306
1307 34550
        assert(vslq->grouping != VSL_g_raw);
1308 34550
        if (vslq->grouping == VSL_g_session)
1309 2686
                return (1); /* All are needed */
1310
1311 31864
        vsl = vslq->vsl;
1312 31864
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
1313 31864
        if (vslq->grouping == VSL_g_vxid) {
1314 29264
                if (!vsl->c_opt && !vsl->b_opt)
1315 21539
                        AZ(vsl->E_opt);
1316 7725
                else if (!vsl->b_opt && !VSL_CLIENT(ptr))
1317 1775
                        return (0);
1318 5950
                else if (!vsl->c_opt && !VSL_BACKEND(ptr))
1319 725
                        return (0);
1320
                /* Need to parse the Begin tag - fallthrough to below */
1321 26764
        }
1322
1323 29364
        tag = VSL_TAG(ptr);
1324 29364
        assert(tag == SLT_Begin);
1325 29364
        i = vtx_parse_link(VSL_CDATA(ptr), &type, &p_vxid, &reason, &sub);
1326 29364
        if (i < 3 || type == VSL_t_unknown)
1327 2
                return (0);
1328
1329 29362
        if (vslq->grouping == VSL_g_request && type == VSL_t_sess)
1330 500
                return (0);
1331
1332 28862
        if (vslq->grouping == VSL_g_vxid && i > 3 && sub > 0 && !vsl->E_opt)
1333 375
                return (0);
1334
1335 28487
        return (1);
1336 34548
}
1337
1338
/* Process next input record */
1339
static enum vsl_status
1340 531296
vslq_next(struct VSLQ *vslq)
1341
{
1342
        const uint32_t *ptr;
1343
        struct VSL_cursor *c;
1344
        enum vsl_status r;
1345
        enum VSL_tag_e tag;
1346
        ssize_t len;
1347
        uint64_t vxid;
1348
        unsigned keep;
1349
        struct vtx *vtx;
1350
1351 531296
        c = vslq->c;
1352 531296
        r = VSL_Next(c);
1353 531296
        if (r != vsl_more)
1354 241920
                return (r);
1355
1356 289376
        assert (r == vsl_more);
1357
1358 289376
        tag = (enum VSL_tag_e)VSL_TAG(c->rec.ptr);
1359 289376
        if (tag == SLT__Batch) {
1360 90305
                vxid = VSL_BATCHID(c->rec.ptr);
1361 180610
                len = VSL_END(c->rec.ptr, VSL_BATCHLEN(c->rec.ptr)) -
1362 90305
                    c->rec.ptr;
1363 90305
                if (len == 0)
1364 0
                        return (r);
1365 90305
                ptr = VSL_NEXT(c->rec.ptr);
1366 90305
                tag = (enum VSL_tag_e)VSL_TAG(ptr);
1367 90305
        } else {
1368 199071
                vxid = VSL_ID(c->rec.ptr);
1369 199071
                len = VSL_NEXT(c->rec.ptr) - c->rec.ptr;
1370 199071
                ptr = c->rec.ptr;
1371
        }
1372 289376
        assert(len > 0);
1373 289376
        if (vxid == 0)
1374
                /* Skip non-transactional records */
1375 64309
                return (r);
1376
1377 225067
        vtx = vtx_lookup(vslq, vxid);
1378 225067
        keep = tag != SLT_Begin || vslq_candidate(vslq, ptr);
1379 225067
        if (vtx == NULL && tag == SLT_Begin && keep) {
1380 28838
                vtx = vtx_add(vslq, vxid);
1381 28838
                AN(vtx);
1382 28838
        }
1383 225067
        if (vtx != NULL) {
1384 209932
                AN(keep);
1385 209932
                r = vtx_append(vslq, vtx, &c->rec, len);
1386 209932
                if (r == vsl_more)
1387 209814
                        vtx_scan(vslq, vtx);
1388 209932
        }
1389
1390 224885
        return (r);
1391 531114
}
1392
1393
/* Test query and report any ready transactions */
1394
static int
1395 32645
vslq_process_ready(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv)
1396
{
1397
        struct vtx *vtx;
1398 32645
        int i = 0;
1399
1400 32645
        AN(vslq);
1401
1402 56151
        while (!VTAILQ_EMPTY(&vslq->ready)) {
1403 27781
                vtx = VTAILQ_FIRST(&vslq->ready);
1404 27781
                CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
1405 27781
                VTAILQ_REMOVE(&vslq->ready, vtx, list_vtx);
1406 27781
                AN(vtx->flags & VTX_F_READY);
1407 27781
                if (func != NULL)
1408 24244
                        i = vslq_callback(vslq, vtx, func, priv);
1409 27781
                vtx_retire(vslq, &vtx);
1410 27781
                AZ(vtx);
1411 27781
                if (i)
1412 4275
                        return (i);
1413
        }
1414
1415 28370
        return (0);
1416 32645
}
1417
1418
/* Process the input cursor, calling the callback function on matching
1419
   transaction sets */
1420
int
1421 947900
VSLQ_Dispatch(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv)
1422
{
1423
        enum vsl_status r;
1424
        int i;
1425
        double now;
1426
        struct vtx *vtx;
1427
1428 947900
        CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC);
1429
1430
        /* Check that we have a cursor */
1431 947900
        if (vslq->c == NULL)
1432 0
                return (vsl_e_abandon);
1433
1434 947900
        if (vslq->grouping == VSL_g_raw)
1435 416726
                return (vslq_raw(vslq, func, priv));
1436
1437
        /* Process next cursor input */
1438 531174
        r = vslq_next(vslq);
1439 531174
        if (r != vsl_more)
1440
                /* At end of log or cursor reports error condition */
1441 241722
                return (r);
1442
1443
        /* Check shmref list and buffer if necessary */
1444 289452
        r = vslq_shmref_check(vslq);
1445 289452
        if (r != vsl_more)
1446
                /* Buffering of shm ref failed */
1447 0
                return (r);
1448
1449 289452
        assert (r == vsl_more);
1450
1451
        /* Check vtx timeout */
1452 289452
        now = VTIM_mono();
1453 289502
        while (!VTAILQ_EMPTY(&vslq->incomplete)) {
1454 210187
                vtx = VTAILQ_FIRST(&vslq->incomplete);
1455 210187
                CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
1456 210187
                if (now - vtx->t_start < vslq->vsl->T_opt)
1457 210137
                        break;
1458 50
                vtx_force(vslq, vtx, "timeout");
1459 50
                AN(vtx->flags & VTX_F_COMPLETE);
1460
        }
1461
1462
        /* Check store limit */
1463 289452
        while (vslq->n_outstanding > vslq->vsl->L_opt &&
1464 0
            !(VTAILQ_EMPTY(&vslq->incomplete))) {
1465 0
                vtx = VTAILQ_FIRST(&vslq->incomplete);
1466 0
                CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
1467 0
                vtx_force(vslq, vtx, "store overflow");
1468 0
                AN(vtx->flags & VTX_F_COMPLETE);
1469 0
                i = vslq_process_ready(vslq, func, priv);
1470 0
                if (i)
1471
                        /* User return code */
1472 0
                        return (i);
1473
        }
1474
1475
        /* Check ready list */
1476 289452
        if (!VTAILQ_EMPTY(&vslq->ready)) {
1477 24246
                i = vslq_process_ready(vslq, func, priv);
1478 24246
                if (i)
1479
                        /* User return code */
1480 4275
                        return (i);
1481 19971
        }
1482
1483 285177
        return (vsl_more);
1484 947900
}
1485
1486
/* Flush any incomplete vtx held on to. Do callbacks if func != NULL */
1487
int
1488 8400
VSLQ_Flush(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv)
1489
{
1490
        struct vtx *vtx;
1491
1492 8400
        CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC);
1493
1494 11943
        while (!VTAILQ_EMPTY(&vslq->incomplete)) {
1495 3543
                vtx = VTAILQ_FIRST(&vslq->incomplete);
1496 3543
                CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
1497 3543
                AZ(vtx->flags & VTX_F_COMPLETE);
1498 3543
                vtx_force(vslq, vtx, "flush");
1499
        }
1500
1501 8400
        return (vslq_process_ready(vslq, func, priv));
1502
}