| | varnish-cache/lib/libvarnishapi/vsl_dispatch.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2006 Verdens Gang AS |
| 2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
| 3 |
|
* All rights reserved. |
| 4 |
|
* |
| 5 |
|
* Author: Martin Blix Grydeland <martin@varnish-software.com> |
| 6 |
|
* |
| 7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 8 |
|
* |
| 9 |
|
* Redistribution and use in source and binary forms, with or without |
| 10 |
|
* modification, are permitted provided that the following conditions |
| 11 |
|
* are met: |
| 12 |
|
* 1. Redistributions of source code must retain the above copyright |
| 13 |
|
* notice, this list of conditions and the following disclaimer. |
| 14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 15 |
|
* notice, this list of conditions and the following disclaimer in the |
| 16 |
|
* documentation and/or other materials provided with the distribution. |
| 17 |
|
* |
| 18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 |
|
* SUCH DAMAGE. |
| 29 |
|
* |
| 30 |
|
*/ |
| 31 |
|
|
| 32 |
|
#include "config.h" |
| 33 |
|
|
| 34 |
|
#include <stdarg.h> |
| 35 |
|
#include <stdint.h> |
| 36 |
|
#include <stdio.h> |
| 37 |
|
#include <stdlib.h> |
| 38 |
|
#include <string.h> |
| 39 |
|
|
| 40 |
|
#include "vdef.h" |
| 41 |
|
#include "vas.h" |
| 42 |
|
#include "miniobj.h" |
| 43 |
|
|
| 44 |
|
#include "vqueue.h" |
| 45 |
|
#include "vre.h" |
| 46 |
|
#include "vtim.h" |
| 47 |
|
#include "vtree.h" |
| 48 |
|
|
| 49 |
|
#include "vapi/vsl.h" |
| 50 |
|
|
| 51 |
|
#include "vsl_api.h" |
| 52 |
|
|
| 53 |
|
#define VTX_CACHE 10 |
| 54 |
|
#define VTX_BUFSIZE_MIN 64 |
| 55 |
|
#define VTX_SHMCHUNKS 3 |
| 56 |
|
|
| 57 |
|
static const char * const vsl_t_names[VSL_t__MAX] = { |
| 58 |
|
[VSL_t_unknown] = "unknown", |
| 59 |
|
[VSL_t_sess] = "sess", |
| 60 |
|
[VSL_t_req] = "req", |
| 61 |
|
[VSL_t_bereq] = "bereq", |
| 62 |
|
[VSL_t_raw] = "raw", |
| 63 |
|
}; |
| 64 |
|
|
| 65 |
|
static const char * const vsl_r_names[VSL_r__MAX] = { |
| 66 |
|
[VSL_r_unknown] = "unknown", |
| 67 |
|
[VSL_r_http_1] = "HTTP/1", |
| 68 |
|
[VSL_r_rxreq] = "rxreq", |
| 69 |
|
[VSL_r_esi] = "esi", |
| 70 |
|
[VSL_r_restart] = "restart", |
| 71 |
|
[VSL_r_pass] = "pass", |
| 72 |
|
[VSL_r_fetch] = "fetch", |
| 73 |
|
[VSL_r_bgfetch] = "bgfetch", |
| 74 |
|
[VSL_r_pipe] = "pipe", |
| 75 |
|
}; |
| 76 |
|
|
| 77 |
|
struct vtx; |
| 78 |
|
VTAILQ_HEAD(vtxhead, vtx); |
| 79 |
|
|
| 80 |
|
struct vslc_raw { |
| 81 |
|
unsigned magic; |
| 82 |
|
#define VSLC_RAW_MAGIC 0x247EBD44 |
| 83 |
|
|
| 84 |
|
struct VSL_cursor cursor; |
| 85 |
|
|
| 86 |
|
const uint32_t *ptr; |
| 87 |
|
}; |
| 88 |
|
|
| 89 |
|
struct synth { |
| 90 |
|
unsigned magic; |
| 91 |
|
#define SYNTH_MAGIC 0xC654479F |
| 92 |
|
|
| 93 |
|
VTAILQ_ENTRY(synth) list; |
| 94 |
|
size_t offset; |
| 95 |
|
uint32_t data[VSL_OVERHEAD + VSL_WORDS(64)]; |
| 96 |
|
}; |
| 97 |
|
VTAILQ_HEAD(synthhead, synth); |
| 98 |
|
|
| 99 |
|
enum chunk_t { |
| 100 |
|
chunk_t__unassigned, |
| 101 |
|
chunk_t_shm, |
| 102 |
|
chunk_t_buf, |
| 103 |
|
}; |
| 104 |
|
|
| 105 |
|
struct chunk { |
| 106 |
|
unsigned magic; |
| 107 |
|
#define CHUNK_MAGIC 0x48DC0194 |
| 108 |
|
enum chunk_t type; |
| 109 |
|
union { |
| 110 |
|
struct { |
| 111 |
|
struct VSLC_ptr start; |
| 112 |
|
VTAILQ_ENTRY(chunk) shmref; |
| 113 |
|
} shm; |
| 114 |
|
struct { |
| 115 |
|
uint32_t *data; |
| 116 |
|
size_t space; |
| 117 |
|
} buf; |
| 118 |
|
}; |
| 119 |
|
size_t len; |
| 120 |
|
struct vtx *vtx; |
| 121 |
|
VTAILQ_ENTRY(chunk) list; |
| 122 |
|
}; |
| 123 |
|
VTAILQ_HEAD(chunkhead, chunk); |
| 124 |
|
|
| 125 |
|
struct vslc_vtx { |
| 126 |
|
unsigned magic; |
| 127 |
|
#define VSLC_VTX_MAGIC 0x74C6523F |
| 128 |
|
|
| 129 |
|
struct VSL_cursor cursor; |
| 130 |
|
|
| 131 |
|
struct vtx *vtx; |
| 132 |
|
struct synth *synth; |
| 133 |
|
struct chunk *chunk; |
| 134 |
|
size_t chunkstart; |
| 135 |
|
size_t offset; |
| 136 |
|
}; |
| 137 |
|
|
| 138 |
|
struct vtx_key { |
| 139 |
|
uint64_t vxid; |
| 140 |
|
VRBT_ENTRY(vtx_key) entry; |
| 141 |
|
}; |
| 142 |
|
VRBT_HEAD(vtx_tree, vtx_key); |
| 143 |
|
|
| 144 |
|
struct vtx { |
| 145 |
|
struct vtx_key key; |
| 146 |
|
unsigned magic; |
| 147 |
|
#define VTX_MAGIC 0xACC21D09 |
| 148 |
|
VTAILQ_ENTRY(vtx) list_child; |
| 149 |
|
VTAILQ_ENTRY(vtx) list_vtx; |
| 150 |
|
|
| 151 |
|
double t_start; |
| 152 |
|
unsigned flags; |
| 153 |
|
#define VTX_F_BEGIN 0x1 /* Begin record processed */ |
| 154 |
|
#define VTX_F_END 0x2 /* End record processed */ |
| 155 |
|
#define VTX_F_COMPLETE 0x4 /* Marked complete. No new children |
| 156 |
|
should be appended */ |
| 157 |
|
#define VTX_F_READY 0x8 /* This vtx and all it's children are |
| 158 |
|
complete */ |
| 159 |
|
|
| 160 |
|
enum VSL_transaction_e type; |
| 161 |
|
enum VSL_reason_e reason; |
| 162 |
|
|
| 163 |
|
struct vtx *parent; |
| 164 |
|
struct vtxhead child; |
| 165 |
|
unsigned n_child; |
| 166 |
|
unsigned n_childready; |
| 167 |
|
unsigned n_descend; |
| 168 |
|
|
| 169 |
|
struct synthhead synth; |
| 170 |
|
|
| 171 |
|
struct chunk shmchunks[VTX_SHMCHUNKS]; |
| 172 |
|
struct chunkhead shmchunks_free; |
| 173 |
|
|
| 174 |
|
struct chunkhead chunks; |
| 175 |
|
size_t len; |
| 176 |
|
|
| 177 |
|
struct vslc_vtx c; |
| 178 |
|
}; |
| 179 |
|
|
| 180 |
|
struct VSLQ { |
| 181 |
|
unsigned magic; |
| 182 |
|
#define VSLQ_MAGIC 0x23A8BE97 |
| 183 |
|
|
| 184 |
|
struct VSL_data *vsl; |
| 185 |
|
struct VSL_cursor *c; |
| 186 |
|
struct vslq_query *query; |
| 187 |
|
|
| 188 |
|
enum VSL_grouping_e grouping; |
| 189 |
|
|
| 190 |
|
/* Structured mode */ |
| 191 |
|
struct vtx_tree tree; |
| 192 |
|
struct vtxhead ready; |
| 193 |
|
struct vtxhead incomplete; |
| 194 |
|
int n_outstanding; |
| 195 |
|
struct chunkhead shmrefs; |
| 196 |
|
struct vtxhead cache; |
| 197 |
|
unsigned n_cache; |
| 198 |
|
|
| 199 |
|
/* Rate limiting */ |
| 200 |
|
double credits; |
| 201 |
|
vtim_mono last_use; |
| 202 |
|
|
| 203 |
|
/* Raw mode */ |
| 204 |
|
struct { |
| 205 |
|
struct vslc_raw c; |
| 206 |
|
struct VSL_transaction trans; |
| 207 |
|
struct VSL_transaction *ptrans[2]; |
| 208 |
|
struct VSLC_ptr start; |
| 209 |
|
ssize_t len; |
| 210 |
|
ssize_t offset; |
| 211 |
|
} raw; |
| 212 |
|
}; |
| 213 |
|
|
| 214 |
|
static void vtx_synth_rec(struct vtx *vtx, unsigned tag, const char *fmt, ...); |
| 215 |
|
/*lint -esym(534, vtx_diag) */ |
| 216 |
|
static int vtx_diag(struct vtx *vtx, const char *msg); |
| 217 |
|
/*lint -esym(534, vtx_diag_tag) */ |
| 218 |
|
static int vtx_diag_tag(struct vtx *vtx, const uint32_t *ptr, |
| 219 |
|
const char *reason); |
| 220 |
|
|
| 221 |
|
static inline int |
| 222 |
530476 |
vtx_keycmp(const struct vtx_key *a, const struct vtx_key *b) |
| 223 |
|
{ |
| 224 |
530476 |
if (a->vxid < b->vxid) |
| 225 |
14900 |
return (-1); |
| 226 |
515576 |
if (a->vxid > b->vxid) |
| 227 |
208030 |
return (1); |
| 228 |
307546 |
return (0); |
| 229 |
530476 |
} |
| 230 |
|
|
| 231 |
35009 |
VRBT_GENERATE_REMOVE_COLOR(vtx_tree, vtx_key, entry, static) |
| 232 |
86271 |
VRBT_GENERATE_REMOVE(vtx_tree, vtx_key, entry, static) |
| 233 |
42765 |
VRBT_GENERATE_INSERT_COLOR(vtx_tree, vtx_key, entry, static) |
| 234 |
52733 |
VRBT_GENERATE_INSERT_FINISH(vtx_tree, vtx_key, entry, static) |
| 235 |
96810 |
VRBT_GENERATE_INSERT(vtx_tree, vtx_key, entry, vtx_keycmp, static) |
| 236 |
567167 |
VRBT_GENERATE_FIND(vtx_tree, vtx_key, entry, vtx_keycmp, static) |
| 237 |
|
|
| 238 |
|
static enum vsl_status v_matchproto_(vslc_next_f) |
| 239 |
1127745 |
vslc_raw_next(const struct VSL_cursor *cursor) |
| 240 |
|
{ |
| 241 |
|
struct vslc_raw *c; |
| 242 |
|
|
| 243 |
1127745 |
CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_RAW_MAGIC); |
| 244 |
1127745 |
assert(&c->cursor == cursor); |
| 245 |
|
|
| 246 |
1127745 |
AN(c->ptr); |
| 247 |
1127745 |
if (c->cursor.rec.ptr == NULL) { |
| 248 |
567791 |
c->cursor.rec.ptr = c->ptr; |
| 249 |
567791 |
return (vsl_more); |
| 250 |
|
} else { |
| 251 |
559954 |
c->cursor.rec.ptr = NULL; |
| 252 |
559954 |
return (vsl_end); |
| 253 |
|
} |
| 254 |
1127745 |
} |
| 255 |
|
|
| 256 |
|
static enum vsl_status v_matchproto_(vslc_reset_f) |
| 257 |
146728 |
vslc_raw_reset(const struct VSL_cursor *cursor) |
| 258 |
|
{ |
| 259 |
|
struct vslc_raw *c; |
| 260 |
|
|
| 261 |
146728 |
CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_RAW_MAGIC); |
| 262 |
146728 |
assert(&c->cursor == cursor); |
| 263 |
|
|
| 264 |
146728 |
AN(c->ptr); |
| 265 |
146728 |
c->cursor.rec.ptr = NULL; |
| 266 |
|
|
| 267 |
146728 |
return (vsl_end); |
| 268 |
|
} |
| 269 |
|
|
| 270 |
|
static const struct vslc_tbl vslc_raw_tbl = { |
| 271 |
|
.magic = VSLC_TBL_MAGIC, |
| 272 |
|
.delete = NULL, |
| 273 |
|
.next = vslc_raw_next, |
| 274 |
|
.reset = vslc_raw_reset, |
| 275 |
|
.check = NULL, |
| 276 |
|
}; |
| 277 |
|
|
| 278 |
|
static enum vsl_status v_matchproto_(vslc_next_f) |
| 279 |
3614484 |
vslc_vtx_next(const struct VSL_cursor *cursor) |
| 280 |
|
{ |
| 281 |
|
struct vslc_vtx *c; |
| 282 |
|
const uint32_t *ptr; |
| 283 |
|
unsigned overrun; |
| 284 |
|
|
| 285 |
3614484 |
CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VTX_MAGIC); |
| 286 |
3614484 |
assert(&c->cursor == cursor); |
| 287 |
3614484 |
CHECK_OBJ_NOTNULL(c->vtx, VTX_MAGIC); |
| 288 |
|
|
| 289 |
3614484 |
do { |
| 290 |
3887196 |
CHECK_OBJ_ORNULL(c->synth, SYNTH_MAGIC); |
| 291 |
3887196 |
if (c->synth != NULL && c->synth->offset == c->offset) { |
| 292 |
|
/* We're at the offset of the next synth record, |
| 293 |
|
point to it and advance the pointer */ |
| 294 |
12344 |
c->cursor.rec.ptr = c->synth->data; |
| 295 |
12344 |
c->synth = VTAILQ_NEXT(c->synth, list); |
| 296 |
12344 |
} else { |
| 297 |
3874852 |
overrun = c->offset > c->vtx->len; |
| 298 |
3874852 |
AZ(overrun); |
| 299 |
3874852 |
if (c->offset == c->vtx->len) |
| 300 |
347406 |
return (vsl_end); |
| 301 |
|
|
| 302 |
|
/* Advance chunk pointer */ |
| 303 |
3527446 |
if (c->chunk == NULL) { |
| 304 |
103319 |
c->chunk = VTAILQ_FIRST(&c->vtx->chunks); |
| 305 |
103319 |
c->chunkstart = 0; |
| 306 |
103319 |
} |
| 307 |
3527446 |
CHECK_OBJ_NOTNULL(c->chunk, CHUNK_MAGIC); |
| 308 |
3620699 |
while (c->offset >= c->chunkstart + c->chunk->len) { |
| 309 |
93253 |
c->chunkstart += c->chunk->len; |
| 310 |
93253 |
c->chunk = VTAILQ_NEXT(c->chunk, list); |
| 311 |
93253 |
CHECK_OBJ_NOTNULL(c->chunk, CHUNK_MAGIC); |
| 312 |
|
} |
| 313 |
|
|
| 314 |
|
/* Point to the next stored record */ |
| 315 |
3527446 |
if (c->chunk->type == chunk_t_shm) |
| 316 |
2906202 |
ptr = c->chunk->shm.start.ptr; |
| 317 |
|
else { |
| 318 |
621244 |
assert(c->chunk->type == chunk_t_buf); |
| 319 |
621244 |
ptr = c->chunk->buf.data; |
| 320 |
|
} |
| 321 |
3527446 |
c->cursor.rec.ptr = ptr + c->offset - c->chunkstart; |
| 322 |
7054892 |
c->offset += VSL_NEXT(c->cursor.rec.ptr) - |
| 323 |
3527446 |
c->cursor.rec.ptr; |
| 324 |
|
} |
| 325 |
3539790 |
} while (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch); |
| 326 |
|
|
| 327 |
3267078 |
return (vsl_more); |
| 328 |
3614484 |
} |
| 329 |
|
|
| 330 |
|
static enum vsl_status v_matchproto_(vslc_reset_f) |
| 331 |
144475 |
vslc_vtx_reset(const struct VSL_cursor *cursor) |
| 332 |
|
{ |
| 333 |
|
struct vslc_vtx *c; |
| 334 |
|
|
| 335 |
144475 |
CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VTX_MAGIC); |
| 336 |
144475 |
assert(&c->cursor == cursor); |
| 337 |
144475 |
CHECK_OBJ_NOTNULL(c->vtx, VTX_MAGIC); |
| 338 |
144475 |
c->synth = VTAILQ_FIRST(&c->vtx->synth); |
| 339 |
144475 |
c->chunk = NULL; |
| 340 |
144475 |
c->chunkstart = 0; |
| 341 |
144475 |
c->offset = 0; |
| 342 |
144475 |
c->cursor.rec.ptr = NULL; |
| 343 |
|
|
| 344 |
144475 |
return (vsl_end); |
| 345 |
|
} |
| 346 |
|
|
| 347 |
|
static const struct vslc_tbl vslc_vtx_tbl = { |
| 348 |
|
.magic = VSLC_TBL_MAGIC, |
| 349 |
|
.delete = NULL, |
| 350 |
|
.next = vslc_vtx_next, |
| 351 |
|
.reset = vslc_vtx_reset, |
| 352 |
|
.check = NULL, |
| 353 |
|
}; |
| 354 |
|
|
| 355 |
|
/* Create a buf chunk */ |
| 356 |
|
static struct chunk * |
| 357 |
17995 |
chunk_newbuf(struct vtx *vtx, const uint32_t *ptr, size_t len) |
| 358 |
|
{ |
| 359 |
|
struct chunk *chunk; |
| 360 |
|
|
| 361 |
17995 |
ALLOC_OBJ(chunk, CHUNK_MAGIC); |
| 362 |
17995 |
XXXAN(chunk); |
| 363 |
17995 |
chunk->type = chunk_t_buf; |
| 364 |
17995 |
chunk->vtx = vtx; |
| 365 |
17995 |
chunk->buf.space = VTX_BUFSIZE_MIN; |
| 366 |
17995 |
while (chunk->buf.space < len) |
| 367 |
0 |
chunk->buf.space *= 2; |
| 368 |
17995 |
chunk->buf.data = malloc(sizeof (uint32_t) * chunk->buf.space); |
| 369 |
17995 |
AN(chunk->buf.data); |
| 370 |
17995 |
memcpy(chunk->buf.data, ptr, sizeof (uint32_t) * len); |
| 371 |
17995 |
chunk->len = len; |
| 372 |
17995 |
return (chunk); |
| 373 |
|
} |
| 374 |
|
|
| 375 |
|
/* Free a buf chunk */ |
| 376 |
|
static void |
| 377 |
17990 |
chunk_freebuf(struct chunk **pchunk) |
| 378 |
|
{ |
| 379 |
|
struct chunk *chunk; |
| 380 |
|
|
| 381 |
17990 |
TAKE_OBJ_NOTNULL(chunk, pchunk, CHUNK_MAGIC); |
| 382 |
17990 |
assert(chunk->type == chunk_t_buf); |
| 383 |
17990 |
free(chunk->buf.data); |
| 384 |
17990 |
FREE_OBJ(chunk); |
| 385 |
17990 |
} |
| 386 |
|
|
| 387 |
|
/* Append a set of records to a chunk */ |
| 388 |
|
static void |
| 389 |
243376 |
chunk_appendbuf(struct chunk *chunk, const uint32_t *ptr, size_t len) |
| 390 |
|
{ |
| 391 |
|
|
| 392 |
243376 |
CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC); |
| 393 |
243376 |
assert(chunk->type == chunk_t_buf); |
| 394 |
243376 |
if (chunk->buf.space < chunk->len + len) { |
| 395 |
76378 |
while (chunk->buf.space < chunk->len + len) |
| 396 |
38188 |
chunk->buf.space *= 2; |
| 397 |
76380 |
chunk->buf.data = realloc(chunk->buf.data, |
| 398 |
38190 |
sizeof (uint32_t) * chunk->buf.space); |
| 399 |
38190 |
} |
| 400 |
243376 |
memcpy(chunk->buf.data + chunk->len, ptr, sizeof (uint32_t) * len); |
| 401 |
243376 |
chunk->len += len; |
| 402 |
243376 |
} |
| 403 |
|
|
| 404 |
|
/* Transform a shm chunk to a buf chunk */ |
| 405 |
|
static void |
| 406 |
0 |
chunk_shm_to_buf(struct VSLQ *vslq, struct chunk *chunk) |
| 407 |
|
{ |
| 408 |
|
struct vtx *vtx; |
| 409 |
|
struct chunk *buf; |
| 410 |
|
|
| 411 |
0 |
CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC); |
| 412 |
0 |
assert(chunk->type == chunk_t_shm); |
| 413 |
0 |
vtx = chunk->vtx; |
| 414 |
0 |
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC); |
| 415 |
|
|
| 416 |
0 |
buf = VTAILQ_PREV(chunk, chunkhead, list); |
| 417 |
0 |
if (buf != NULL && buf->type == chunk_t_buf) |
| 418 |
|
/* Previous is a buf chunk, append to it */ |
| 419 |
0 |
chunk_appendbuf(buf, chunk->shm.start.ptr, chunk->len); |
| 420 |
|
else { |
| 421 |
|
/* Create a new buf chunk and insert it before this */ |
| 422 |
0 |
buf = chunk_newbuf(vtx, chunk->shm.start.ptr, chunk->len); |
| 423 |
0 |
AN(buf); |
| 424 |
0 |
VTAILQ_INSERT_BEFORE(chunk, buf, list); |
| 425 |
|
} |
| 426 |
|
|
| 427 |
|
/* Reset cursor chunk pointer, vslc_vtx_next will set it correctly */ |
| 428 |
0 |
vtx->c.chunk = NULL; |
| 429 |
|
|
| 430 |
|
/* Remove from the shmref list and vtx, and put chunk back |
| 431 |
|
on the free list */ |
| 432 |
0 |
VTAILQ_REMOVE(&vslq->shmrefs, chunk, shm.shmref); |
| 433 |
0 |
VTAILQ_REMOVE(&vtx->chunks, chunk, list); |
| 434 |
0 |
VTAILQ_INSERT_HEAD(&vtx->shmchunks_free, chunk, list); |
| 435 |
0 |
} |
| 436 |
|
|
| 437 |
|
/* Append a set of records to a vtx structure */ |
| 438 |
|
static enum vsl_status |
| 439 |
351859 |
vtx_append(struct VSLQ *vslq, struct vtx *vtx, const struct VSLC_ptr *start, |
| 440 |
|
size_t len) |
| 441 |
|
{ |
| 442 |
|
struct chunk *chunk; |
| 443 |
|
enum vsl_check i; |
| 444 |
|
|
| 445 |
351859 |
AN(vtx); |
| 446 |
351859 |
AN(len); |
| 447 |
351859 |
AN(start); |
| 448 |
|
|
| 449 |
351859 |
i = VSL_Check(vslq->c, start); |
| 450 |
351859 |
if (i == vsl_check_e_inval) |
| 451 |
0 |
return (vsl_e_overrun); |
| 452 |
|
|
| 453 |
351859 |
if (i == vsl_check_valid && !VTAILQ_EMPTY(&vtx->shmchunks_free)) { |
| 454 |
|
/* Shmref it */ |
| 455 |
90475 |
chunk = VTAILQ_FIRST(&vtx->shmchunks_free); |
| 456 |
90475 |
CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC); |
| 457 |
90475 |
assert(chunk->type == chunk_t_shm); |
| 458 |
90475 |
assert(chunk->vtx == vtx); |
| 459 |
90475 |
VTAILQ_REMOVE(&vtx->shmchunks_free, chunk, list); |
| 460 |
90475 |
chunk->shm.start = *start; |
| 461 |
90475 |
chunk->len = len; |
| 462 |
90475 |
VTAILQ_INSERT_TAIL(&vtx->chunks, chunk, list); |
| 463 |
|
|
| 464 |
|
/* Append to shmref list */ |
| 465 |
90475 |
VTAILQ_INSERT_TAIL(&vslq->shmrefs, chunk, shm.shmref); |
| 466 |
90475 |
} else { |
| 467 |
|
/* Buffer it */ |
| 468 |
261384 |
chunk = VTAILQ_LAST(&vtx->chunks, chunkhead); |
| 469 |
261384 |
CHECK_OBJ_ORNULL(chunk, CHUNK_MAGIC); |
| 470 |
261384 |
if (chunk != NULL && chunk->type == chunk_t_buf) { |
| 471 |
|
/* Tail is a buf chunk, append to that */ |
| 472 |
243391 |
chunk_appendbuf(chunk, start->ptr, len); |
| 473 |
243391 |
} else { |
| 474 |
|
/* Append new buf chunk */ |
| 475 |
17993 |
chunk = chunk_newbuf(vtx, start->ptr, len); |
| 476 |
17993 |
AN(chunk); |
| 477 |
17993 |
VTAILQ_INSERT_TAIL(&vtx->chunks, chunk, list); |
| 478 |
|
} |
| 479 |
|
} |
| 480 |
351859 |
vtx->len += len; |
| 481 |
351859 |
return (vsl_more); |
| 482 |
351859 |
} |
| 483 |
|
|
| 484 |
|
/* Allocate a new vtx structure */ |
| 485 |
|
static struct vtx * |
| 486 |
52746 |
vtx_new(struct VSLQ *vslq) |
| 487 |
|
{ |
| 488 |
|
struct vtx *vtx; |
| 489 |
|
int i; |
| 490 |
|
|
| 491 |
52746 |
AN(vslq); |
| 492 |
52746 |
if (vslq->n_cache) { |
| 493 |
28344 |
AZ(VTAILQ_EMPTY(&vslq->cache)); |
| 494 |
28344 |
vtx = VTAILQ_FIRST(&vslq->cache); |
| 495 |
28344 |
VTAILQ_REMOVE(&vslq->cache, vtx, list_child); |
| 496 |
28344 |
vslq->n_cache--; |
| 497 |
28344 |
} else { |
| 498 |
24402 |
ALLOC_OBJ(vtx, VTX_MAGIC); |
| 499 |
24402 |
AN(vtx); |
| 500 |
|
|
| 501 |
24402 |
VTAILQ_INIT(&vtx->child); |
| 502 |
24402 |
VTAILQ_INIT(&vtx->shmchunks_free); |
| 503 |
97606 |
for (i = 0; i < VTX_SHMCHUNKS; i++) { |
| 504 |
73204 |
vtx->shmchunks[i].magic = CHUNK_MAGIC; |
| 505 |
73204 |
vtx->shmchunks[i].type = chunk_t_shm; |
| 506 |
73204 |
vtx->shmchunks[i].vtx = vtx; |
| 507 |
73204 |
VTAILQ_INSERT_TAIL(&vtx->shmchunks_free, |
| 508 |
|
&vtx->shmchunks[i], list); |
| 509 |
73204 |
} |
| 510 |
24402 |
VTAILQ_INIT(&vtx->chunks); |
| 511 |
24402 |
VTAILQ_INIT(&vtx->synth); |
| 512 |
24402 |
vtx->c.magic = VSLC_VTX_MAGIC; |
| 513 |
24402 |
vtx->c.vtx = vtx; |
| 514 |
24402 |
vtx->c.cursor.priv_tbl = &vslc_vtx_tbl; |
| 515 |
24402 |
vtx->c.cursor.priv_data = &vtx->c; |
| 516 |
|
} |
| 517 |
|
|
| 518 |
52746 |
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC); |
| 519 |
52746 |
vtx->key.vxid = 0; |
| 520 |
52746 |
vtx->t_start = VTIM_mono(); |
| 521 |
52746 |
vtx->flags = 0; |
| 522 |
52746 |
vtx->type = VSL_t_unknown; |
| 523 |
52746 |
vtx->reason = VSL_r_unknown; |
| 524 |
52746 |
vtx->parent = NULL; |
| 525 |
52746 |
vtx->n_child = 0; |
| 526 |
52746 |
vtx->n_childready = 0; |
| 527 |
52746 |
vtx->n_descend = 0; |
| 528 |
52746 |
vtx->len = 0; |
| 529 |
52746 |
AN(vslc_vtx_reset(&vtx->c.cursor) == vsl_end); |
| 530 |
|
|
| 531 |
52746 |
return (vtx); |
| 532 |
|
} |
| 533 |
|
|
| 534 |
|
/* Disuse a vtx and all it's children, freeing any resources held. Free or |
| 535 |
|
cache the vtx for later use */ |
| 536 |
|
static void |
| 537 |
52717 |
vtx_retire(struct VSLQ *vslq, struct vtx **pvtx) |
| 538 |
|
{ |
| 539 |
|
struct vtx *vtx; |
| 540 |
|
struct vtx *child; |
| 541 |
|
struct synth *synth; |
| 542 |
|
struct chunk *chunk; |
| 543 |
|
|
| 544 |
52717 |
AN(vslq); |
| 545 |
52717 |
TAKE_OBJ_NOTNULL(vtx, pvtx, VTX_MAGIC); |
| 546 |
|
|
| 547 |
52717 |
AN(vtx->flags & VTX_F_COMPLETE); |
| 548 |
52717 |
AN(vtx->flags & VTX_F_READY); |
| 549 |
52717 |
AZ(vtx->parent); |
| 550 |
|
|
| 551 |
58124 |
while (!VTAILQ_EMPTY(&vtx->child)) { |
| 552 |
5407 |
child = VTAILQ_FIRST(&vtx->child); |
| 553 |
5407 |
assert(child->parent == vtx); |
| 554 |
5407 |
AN(vtx->n_child); |
| 555 |
5407 |
assert(vtx->n_descend >= child->n_descend + 1); |
| 556 |
5407 |
VTAILQ_REMOVE(&vtx->child, child, list_child); |
| 557 |
5407 |
child->parent = NULL; |
| 558 |
5407 |
vtx->n_child--; |
| 559 |
5407 |
vtx->n_descend -= child->n_descend + 1; |
| 560 |
5407 |
vtx_retire(vslq, &child); |
| 561 |
5407 |
AZ(child); |
| 562 |
|
} |
| 563 |
52717 |
AZ(vtx->n_child); |
| 564 |
52717 |
AZ(vtx->n_descend); |
| 565 |
52717 |
vtx->n_childready = 0; |
| 566 |
|
// remove rval is no way to check if element was present |
| 567 |
52717 |
(void)VRBT_REMOVE(vtx_tree, &vslq->tree, &vtx->key); |
| 568 |
52717 |
vtx->key.vxid = 0; |
| 569 |
52717 |
vtx->flags = 0; |
| 570 |
|
|
| 571 |
64901 |
while (!VTAILQ_EMPTY(&vtx->synth)) { |
| 572 |
12184 |
synth = VTAILQ_FIRST(&vtx->synth); |
| 573 |
12184 |
CHECK_OBJ_NOTNULL(synth, SYNTH_MAGIC); |
| 574 |
12184 |
VTAILQ_REMOVE(&vtx->synth, synth, list); |
| 575 |
12184 |
FREE_OBJ(synth); |
| 576 |
|
} |
| 577 |
|
|
| 578 |
161162 |
while (!VTAILQ_EMPTY(&vtx->chunks)) { |
| 579 |
108445 |
chunk = VTAILQ_FIRST(&vtx->chunks); |
| 580 |
108445 |
CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC); |
| 581 |
108445 |
VTAILQ_REMOVE(&vtx->chunks, chunk, list); |
| 582 |
108445 |
if (chunk->type == chunk_t_shm) { |
| 583 |
90454 |
VTAILQ_REMOVE(&vslq->shmrefs, chunk, shm.shmref); |
| 584 |
90454 |
VTAILQ_INSERT_HEAD(&vtx->shmchunks_free, chunk, list); |
| 585 |
90454 |
} else { |
| 586 |
17991 |
assert(chunk->type == chunk_t_buf); |
| 587 |
17991 |
chunk_freebuf(&chunk); |
| 588 |
17991 |
AZ(chunk); |
| 589 |
|
} |
| 590 |
|
} |
| 591 |
52717 |
vtx->len = 0; |
| 592 |
52717 |
AN(vslq->n_outstanding); |
| 593 |
52717 |
vslq->n_outstanding--; |
| 594 |
|
|
| 595 |
52717 |
if (vslq->n_cache < VTX_CACHE) { |
| 596 |
51883 |
VTAILQ_INSERT_HEAD(&vslq->cache, vtx, list_child); |
| 597 |
51883 |
vslq->n_cache++; |
| 598 |
51883 |
} else |
| 599 |
834 |
FREE_OBJ(vtx); |
| 600 |
|
|
| 601 |
52717 |
} |
| 602 |
|
|
| 603 |
|
/* Lookup a vtx by vxid from the managed list */ |
| 604 |
|
static struct vtx * |
| 605 |
388270 |
vtx_lookup(const struct VSLQ *vslq, uint64_t vxid) |
| 606 |
|
{ |
| 607 |
|
struct vtx_key lkey, *key; |
| 608 |
|
struct vtx *vtx; |
| 609 |
|
|
| 610 |
388270 |
AN(vslq); |
| 611 |
388270 |
lkey.vxid = vxid; |
| 612 |
388270 |
key = VRBT_FIND(vtx_tree, &vslq->tree, &lkey); |
| 613 |
388270 |
if (key == NULL) |
| 614 |
80743 |
return (NULL); |
| 615 |
307527 |
CAST_OBJ_NOTNULL(vtx, (void *)key, VTX_MAGIC); |
| 616 |
307527 |
return (vtx); |
| 617 |
388270 |
} |
| 618 |
|
|
| 619 |
|
/* Insert a new vtx into the managed list */ |
| 620 |
|
static struct vtx * |
| 621 |
52735 |
vtx_add(struct VSLQ *vslq, uint64_t vxid) |
| 622 |
|
{ |
| 623 |
|
struct vtx *vtx; |
| 624 |
|
|
| 625 |
52735 |
AN(vslq); |
| 626 |
52735 |
vtx = vtx_new(vslq); |
| 627 |
52735 |
AN(vtx); |
| 628 |
52735 |
vtx->key.vxid = vxid; |
| 629 |
52735 |
AZ(VRBT_INSERT(vtx_tree, &vslq->tree, &vtx->key)); |
| 630 |
52735 |
VTAILQ_INSERT_TAIL(&vslq->incomplete, vtx, list_vtx); |
| 631 |
52735 |
vslq->n_outstanding++; |
| 632 |
52735 |
return (vtx); |
| 633 |
|
} |
| 634 |
|
|
| 635 |
|
/* Mark a vtx complete, update child counters and if possible push it or |
| 636 |
|
it's top parent to the ready state */ |
| 637 |
|
static void |
| 638 |
52728 |
vtx_mark_complete(struct VSLQ *vslq, struct vtx *vtx) |
| 639 |
|
{ |
| 640 |
|
|
| 641 |
52728 |
AN(vslq); |
| 642 |
52728 |
AN(vtx->flags & VTX_F_END); |
| 643 |
52728 |
AZ(vtx->flags & VTX_F_COMPLETE); |
| 644 |
|
|
| 645 |
52728 |
if (vtx->type == VSL_t_unknown) |
| 646 |
0 |
vtx_diag(vtx, "vtx of unknown type marked complete"); |
| 647 |
|
|
| 648 |
52728 |
vtx->flags |= VTX_F_COMPLETE; |
| 649 |
52728 |
VTAILQ_REMOVE(&vslq->incomplete, vtx, list_vtx); |
| 650 |
|
|
| 651 |
58129 |
while (1) { |
| 652 |
58129 |
AZ(vtx->flags & VTX_F_READY); |
| 653 |
58129 |
if (vtx->flags & VTX_F_COMPLETE && |
| 654 |
52966 |
vtx->n_child == vtx->n_childready) |
| 655 |
52728 |
vtx->flags |= VTX_F_READY; |
| 656 |
|
else |
| 657 |
5401 |
return; |
| 658 |
52728 |
if (vtx->parent == NULL) { |
| 659 |
|
/* Top level vtx ready */ |
| 660 |
47327 |
VTAILQ_INSERT_TAIL(&vslq->ready, vtx, list_vtx); |
| 661 |
47327 |
return; |
| 662 |
|
} |
| 663 |
5401 |
vtx = vtx->parent; |
| 664 |
5401 |
vtx->n_childready++; |
| 665 |
5401 |
assert(vtx->n_child >= vtx->n_childready); |
| 666 |
|
} |
| 667 |
52728 |
} |
| 668 |
|
|
| 669 |
|
/* Add a child to a parent, and update child counters */ |
| 670 |
|
static void |
| 671 |
5397 |
vtx_set_parent(struct vtx *parent, struct vtx *child) |
| 672 |
|
{ |
| 673 |
|
|
| 674 |
5397 |
CHECK_OBJ_NOTNULL(parent, VTX_MAGIC); |
| 675 |
5397 |
CHECK_OBJ_NOTNULL(child, VTX_MAGIC); |
| 676 |
5397 |
assert(parent != child); |
| 677 |
5397 |
AZ(parent->flags & VTX_F_COMPLETE); |
| 678 |
5397 |
AZ(child->flags & VTX_F_COMPLETE); |
| 679 |
5397 |
AZ(child->parent); |
| 680 |
5397 |
child->parent = parent; |
| 681 |
5397 |
VTAILQ_INSERT_TAIL(&parent->child, child, list_child); |
| 682 |
5397 |
parent->n_child++; |
| 683 |
5397 |
do |
| 684 |
6436 |
parent->n_descend += 1 + child->n_descend; |
| 685 |
6436 |
while ((parent = parent->parent) != NULL); |
| 686 |
5397 |
} |
| 687 |
|
|
| 688 |
|
/* Parse a begin or link record. Returns the number of elements that was |
| 689 |
|
successfully parsed. */ |
| 690 |
|
static int |
| 691 |
141607 |
vtx_parse_link(const char *str, enum VSL_transaction_e *ptype, |
| 692 |
|
uint64_t *pvxid, enum VSL_reason_e *preason, uint64_t *psub) |
| 693 |
|
{ |
| 694 |
|
char type[16], reason[16]; |
| 695 |
|
uintmax_t vxid, sub; |
| 696 |
|
int i; |
| 697 |
|
enum VSL_transaction_e et; |
| 698 |
|
enum VSL_reason_e er; |
| 699 |
|
|
| 700 |
141607 |
AN(str); |
| 701 |
141607 |
AN(ptype); |
| 702 |
141607 |
AN(pvxid); |
| 703 |
141607 |
AN(preason); |
| 704 |
|
|
| 705 |
141607 |
i = sscanf(str, "%15s %ju %15s %ju", type, &vxid, reason, &sub); |
| 706 |
141607 |
if (i < 1) |
| 707 |
0 |
return (0); |
| 708 |
|
|
| 709 |
|
/* transaction type */ |
| 710 |
435951 |
for (et = VSL_t_unknown; et < VSL_t__MAX; et++) |
| 711 |
435934 |
if (!strcmp(type, vsl_t_names[et])) |
| 712 |
141590 |
break; |
| 713 |
141607 |
if (et >= VSL_t__MAX) |
| 714 |
0 |
et = VSL_t_unknown; |
| 715 |
141597 |
*ptype = et; |
| 716 |
141597 |
if (i == 1) |
| 717 |
0 |
return (1); |
| 718 |
|
|
| 719 |
|
/* vxid */ |
| 720 |
141597 |
assert((vxid & ~VSL_IDENTMASK) == 0); |
| 721 |
141597 |
*pvxid = vxid; |
| 722 |
141597 |
if (i == 2) |
| 723 |
0 |
return (2); |
| 724 |
|
|
| 725 |
|
/* transaction reason */ |
| 726 |
585653 |
for (er = VSL_r_unknown; er < VSL_r__MAX; er++) |
| 727 |
582767 |
if (!strcmp(reason, vsl_r_names[er])) |
| 728 |
138711 |
break; |
| 729 |
141597 |
if (er >= VSL_r__MAX) |
| 730 |
2882 |
er = VSL_r_unknown; |
| 731 |
141597 |
*preason = er; |
| 732 |
141597 |
if (i == 3) |
| 733 |
135517 |
return (3); |
| 734 |
|
|
| 735 |
|
/* request sub-level */ |
| 736 |
6080 |
if (psub != NULL) |
| 737 |
2200 |
*psub = sub; |
| 738 |
6080 |
return (4); |
| 739 |
141597 |
} |
| 740 |
|
|
| 741 |
|
/* Parse and process a begin record */ |
| 742 |
|
static int |
| 743 |
52738 |
vtx_scan_begin(struct VSLQ *vslq, struct vtx *vtx, const uint32_t *ptr) |
| 744 |
|
{ |
| 745 |
|
int i; |
| 746 |
|
enum VSL_transaction_e type; |
| 747 |
|
enum VSL_reason_e reason; |
| 748 |
|
uint64_t p_vxid; |
| 749 |
|
struct vtx *p_vtx; |
| 750 |
|
|
| 751 |
52738 |
assert(VSL_TAG(ptr) == SLT_Begin); |
| 752 |
|
|
| 753 |
52738 |
AZ(vtx->flags & VTX_F_READY); |
| 754 |
|
|
| 755 |
52738 |
i = vtx_parse_link(VSL_CDATA(ptr), &type, &p_vxid, &reason, NULL); |
| 756 |
52738 |
if (i < 3) |
| 757 |
0 |
return (vtx_diag_tag(vtx, ptr, "parse error")); |
| 758 |
52738 |
if (type == VSL_t_unknown) |
| 759 |
0 |
(void)vtx_diag_tag(vtx, ptr, "unknown vxid type"); |
| 760 |
|
|
| 761 |
|
/* Check/set vtx type */ |
| 762 |
52738 |
if (vtx->type != VSL_t_unknown && vtx->type != type) |
| 763 |
|
/* Type not matching the one previously set by a link |
| 764 |
|
record */ |
| 765 |
0 |
(void)vtx_diag_tag(vtx, ptr, "type mismatch"); |
| 766 |
52738 |
vtx->type = type; |
| 767 |
52738 |
vtx->reason = reason; |
| 768 |
|
|
| 769 |
52738 |
if (p_vxid == 0) |
| 770 |
|
/* Zero means no parent */ |
| 771 |
15359 |
return (0); |
| 772 |
37379 |
if (p_vxid == vtx->key.vxid) |
| 773 |
0 |
return (vtx_diag_tag(vtx, ptr, "link to self")); |
| 774 |
|
|
| 775 |
37379 |
if (vslq->grouping == VSL_g_vxid) |
| 776 |
30855 |
return (0); /* No links */ |
| 777 |
6524 |
if (vslq->grouping == VSL_g_request && vtx->type == VSL_t_req && |
| 778 |
1960 |
vtx->reason == VSL_r_rxreq) |
| 779 |
1120 |
return (0); /* No links */ |
| 780 |
|
|
| 781 |
5404 |
if (vtx->parent != NULL) { |
| 782 |
2484 |
if (vtx->parent->key.vxid != p_vxid) { |
| 783 |
|
/* This vtx already belongs to a different |
| 784 |
|
parent */ |
| 785 |
0 |
return (vtx_diag_tag(vtx, ptr, "link mismatch")); |
| 786 |
|
} else |
| 787 |
|
/* Link already exists */ |
| 788 |
2484 |
return (0); |
| 789 |
|
} |
| 790 |
|
|
| 791 |
2920 |
p_vtx = vtx_lookup(vslq, p_vxid); |
| 792 |
2920 |
if (p_vtx == NULL) { |
| 793 |
|
/* Not seen parent yet. Create it. */ |
| 794 |
1240 |
p_vtx = vtx_add(vslq, p_vxid); |
| 795 |
1240 |
AN(p_vtx); |
| 796 |
1240 |
} else { |
| 797 |
1680 |
CHECK_OBJ_NOTNULL(p_vtx, VTX_MAGIC); |
| 798 |
1680 |
if (p_vtx->flags & VTX_F_COMPLETE) |
| 799 |
0 |
return (vtx_diag_tag(vtx, ptr, "link too late")); |
| 800 |
|
} |
| 801 |
|
|
| 802 |
|
/* Create link */ |
| 803 |
2920 |
vtx_set_parent(p_vtx, vtx); |
| 804 |
|
|
| 805 |
2920 |
return (0); |
| 806 |
52738 |
} |
| 807 |
|
|
| 808 |
|
/* Parse and process a link record */ |
| 809 |
|
static int |
| 810 |
39004 |
vtx_scan_link(struct VSLQ *vslq, struct vtx *vtx, const uint32_t *ptr) |
| 811 |
|
{ |
| 812 |
|
int i; |
| 813 |
|
enum VSL_transaction_e c_type; |
| 814 |
|
enum VSL_reason_e c_reason; |
| 815 |
|
uint64_t c_vxid; |
| 816 |
|
struct vtx *c_vtx; |
| 817 |
|
|
| 818 |
39004 |
assert(VSL_TAG(ptr) == SLT_Link); |
| 819 |
|
|
| 820 |
39004 |
AZ(vtx->flags & VTX_F_READY); |
| 821 |
|
|
| 822 |
39004 |
i = vtx_parse_link(VSL_CDATA(ptr), &c_type, &c_vxid, &c_reason, NULL); |
| 823 |
39004 |
if (i < 3) |
| 824 |
0 |
return (vtx_diag_tag(vtx, ptr, "parse error")); |
| 825 |
39004 |
if (c_type == VSL_t_unknown) |
| 826 |
0 |
(void)vtx_diag_tag(vtx, ptr, "unknown vxid type"); |
| 827 |
|
|
| 828 |
39004 |
if (vslq->grouping == VSL_g_vxid) |
| 829 |
33603 |
return (0); /* No links */ |
| 830 |
5401 |
if (vslq->grouping == VSL_g_request && vtx->type == VSL_t_sess) |
| 831 |
0 |
return (0); /* No links */ |
| 832 |
|
|
| 833 |
5401 |
if (c_vxid == 0) |
| 834 |
0 |
return (vtx_diag_tag(vtx, ptr, "illegal link vxid")); |
| 835 |
5401 |
if (c_vxid == vtx->key.vxid) |
| 836 |
0 |
return (vtx_diag_tag(vtx, ptr, "link to self")); |
| 837 |
|
|
| 838 |
|
/* Lookup and check child vtx */ |
| 839 |
5401 |
c_vtx = vtx_lookup(vslq, c_vxid); |
| 840 |
5401 |
if (c_vtx == NULL) { |
| 841 |
|
/* Child not seen before. Insert it and create link */ |
| 842 |
2481 |
c_vtx = vtx_add(vslq, c_vxid); |
| 843 |
2481 |
AN(c_vtx); |
| 844 |
2481 |
AZ(c_vtx->parent); |
| 845 |
2481 |
c_vtx->type = c_type; |
| 846 |
2481 |
c_vtx->reason = c_reason; |
| 847 |
2481 |
vtx_set_parent(vtx, c_vtx); |
| 848 |
2481 |
return (0); |
| 849 |
|
} |
| 850 |
|
|
| 851 |
2920 |
CHECK_OBJ_NOTNULL(c_vtx, VTX_MAGIC); |
| 852 |
2920 |
if (c_vtx->parent == vtx) |
| 853 |
|
/* Link already exists */ |
| 854 |
2920 |
return (0); |
| 855 |
0 |
if (c_vtx->parent != NULL && c_vtx->parent != vtx) |
| 856 |
0 |
return (vtx_diag_tag(vtx, ptr, "duplicate link")); |
| 857 |
0 |
if (c_vtx->flags & VTX_F_COMPLETE) |
| 858 |
0 |
return (vtx_diag_tag(vtx, ptr, "link too late")); |
| 859 |
0 |
if (c_vtx->type != VSL_t_unknown && c_vtx->type != c_type) |
| 860 |
0 |
(void)vtx_diag_tag(vtx, ptr, "type mismatch"); |
| 861 |
|
|
| 862 |
0 |
c_vtx->type = c_type; |
| 863 |
0 |
c_vtx->reason = c_reason; |
| 864 |
0 |
vtx_set_parent(vtx, c_vtx); |
| 865 |
0 |
return (0); |
| 866 |
39004 |
} |
| 867 |
|
|
| 868 |
|
/* Scan the records of a vtx, performing processing actions on specific |
| 869 |
|
records */ |
| 870 |
|
static void |
| 871 |
363592 |
vtx_scan(struct VSLQ *vslq, struct vtx *vtx) |
| 872 |
|
{ |
| 873 |
|
const uint32_t *ptr; |
| 874 |
|
enum VSL_tag_e tag; |
| 875 |
|
|
| 876 |
4073299 |
while (!(vtx->flags & VTX_F_COMPLETE) && |
| 877 |
2009735 |
vslc_vtx_next(&vtx->c.cursor) == 1) { |
| 878 |
1699972 |
ptr = vtx->c.cursor.rec.ptr; |
| 879 |
1699972 |
if (VSL_ID(ptr) != vtx->key.vxid) { |
| 880 |
0 |
(void)vtx_diag_tag(vtx, ptr, "vxid mismatch"); |
| 881 |
0 |
continue; |
| 882 |
|
} |
| 883 |
|
|
| 884 |
1699972 |
tag = VSL_TAG(ptr); |
| 885 |
1699972 |
assert(tag != SLT__Batch); |
| 886 |
|
|
| 887 |
1699972 |
switch (tag) { |
| 888 |
|
case SLT_Begin: |
| 889 |
52735 |
if (vtx->flags & VTX_F_BEGIN) |
| 890 |
0 |
(void)vtx_diag_tag(vtx, ptr, "duplicate begin"); |
| 891 |
|
else { |
| 892 |
52735 |
(void)vtx_scan_begin(vslq, vtx, ptr); |
| 893 |
52735 |
vtx->flags |= VTX_F_BEGIN; |
| 894 |
|
} |
| 895 |
52735 |
break; |
| 896 |
|
|
| 897 |
|
case SLT_Link: |
| 898 |
39006 |
(void)vtx_scan_link(vslq, vtx, ptr); |
| 899 |
39006 |
break; |
| 900 |
|
|
| 901 |
|
case SLT_End: |
| 902 |
52738 |
AZ(vtx->flags & VTX_F_END); |
| 903 |
52738 |
vtx->flags |= VTX_F_END; |
| 904 |
52738 |
vtx_mark_complete(vslq, vtx); |
| 905 |
52738 |
break; |
| 906 |
|
|
| 907 |
|
default: |
| 908 |
1555493 |
break; |
| 909 |
|
} |
| 910 |
|
} |
| 911 |
363592 |
} |
| 912 |
|
|
| 913 |
|
/* Force a vtx into complete status by synthing the necessary outstanding |
| 914 |
|
records */ |
| 915 |
|
static void |
| 916 |
6091 |
vtx_force(struct VSLQ *vslq, struct vtx *vtx, const char *reason) |
| 917 |
|
{ |
| 918 |
|
|
| 919 |
6091 |
AZ(vtx->flags & VTX_F_COMPLETE); |
| 920 |
6091 |
AZ(vtx->flags & VTX_F_READY); |
| 921 |
6091 |
vtx_scan(vslq, vtx); |
| 922 |
6091 |
if (!(vtx->flags & VTX_F_BEGIN)) |
| 923 |
4 |
vtx_synth_rec(vtx, SLT_Begin, "%s %u synth", |
| 924 |
2 |
vsl_t_names[vtx->type], 0); |
| 925 |
6091 |
vtx_diag(vtx, reason); |
| 926 |
6091 |
if (!(vtx->flags & VTX_F_END)) |
| 927 |
6091 |
vtx_synth_rec(vtx, SLT_End, "synth"); |
| 928 |
6091 |
vtx_scan(vslq, vtx); |
| 929 |
6091 |
AN(vtx->flags & VTX_F_COMPLETE); |
| 930 |
6091 |
} |
| 931 |
|
|
| 932 |
|
static int |
| 933 |
440 |
vslq_ratelimit(struct VSLQ *vslq) |
| 934 |
|
{ |
| 935 |
|
vtim_mono now; |
| 936 |
|
vtim_dur delta; |
| 937 |
|
|
| 938 |
440 |
CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC); |
| 939 |
440 |
CHECK_OBJ_NOTNULL(vslq->vsl, VSL_MAGIC); |
| 940 |
|
|
| 941 |
440 |
now = VTIM_mono(); |
| 942 |
440 |
delta = now - vslq->last_use; |
| 943 |
440 |
vslq->credits += (delta / vslq->vsl->R_opt_p) * vslq->vsl->R_opt_l; |
| 944 |
440 |
vslq->credits = vmin_t(double, vslq->credits, vslq->vsl->R_opt_l); |
| 945 |
440 |
vslq->last_use = now; |
| 946 |
|
|
| 947 |
440 |
if (vslq->credits < 1.0) |
| 948 |
0 |
return (0); |
| 949 |
|
|
| 950 |
440 |
vslq->credits -= 1.0; |
| 951 |
440 |
return (1); |
| 952 |
440 |
} |
| 953 |
|
|
| 954 |
|
/* Build transaction array, do the query and callback. Returns 0 or the |
| 955 |
|
return value from func */ |
| 956 |
|
static int |
| 957 |
41321 |
vslq_callback(struct VSLQ *vslq, struct vtx *vtx, VSLQ_dispatch_f *func, |
| 958 |
|
void *priv) |
| 959 |
|
{ |
| 960 |
41321 |
unsigned n = vtx->n_descend + 1; |
| 961 |
41321 |
struct vtx *vtxs[n]; |
| 962 |
41321 |
struct VSL_transaction trans[n]; |
| 963 |
41321 |
struct VSL_transaction *ptrans[n + 1]; |
| 964 |
|
unsigned i, j; |
| 965 |
|
|
| 966 |
41321 |
AN(vslq); |
| 967 |
41321 |
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC); |
| 968 |
41321 |
AN(vtx->flags & VTX_F_READY); |
| 969 |
41321 |
AN(func); |
| 970 |
|
|
| 971 |
41321 |
if (vslq->grouping == VSL_g_session && |
| 972 |
875 |
vtx->type != VSL_t_sess) |
| 973 |
0 |
return (0); |
| 974 |
41321 |
if (vslq->grouping == VSL_g_request && |
| 975 |
1120 |
vtx->type != VSL_t_req) |
| 976 |
0 |
return (0); |
| 977 |
|
|
| 978 |
|
/* Build transaction array */ |
| 979 |
41321 |
AN(vslc_vtx_reset(&vtx->c.cursor) == vsl_end); |
| 980 |
41321 |
vtxs[0] = vtx; |
| 981 |
41321 |
trans[0].level = 1; |
| 982 |
41321 |
trans[0].vxid = vtx->key.vxid; |
| 983 |
41321 |
trans[0].vxid_parent = 0; |
| 984 |
41321 |
trans[0].type = vtx->type; |
| 985 |
41321 |
trans[0].reason = vtx->reason; |
| 986 |
41321 |
trans[0].c = &vtx->c.cursor; |
| 987 |
41321 |
i = 1; |
| 988 |
41321 |
j = 0; |
| 989 |
87434 |
while (j < i) { |
| 990 |
50894 |
VTAILQ_FOREACH(vtx, &vtxs[j]->child, list_child) { |
| 991 |
4781 |
assert(i < n); |
| 992 |
4781 |
AN(vslc_vtx_reset(&vtx->c.cursor) == vsl_end); |
| 993 |
4781 |
vtxs[i] = vtx; |
| 994 |
4781 |
if (vtx->reason == VSL_r_restart) |
| 995 |
|
/* Restarts stay at the same level as parent */ |
| 996 |
80 |
trans[i].level = trans[j].level; |
| 997 |
|
else |
| 998 |
4701 |
trans[i].level = trans[j].level + 1; |
| 999 |
4781 |
trans[i].vxid = vtx->key.vxid; |
| 1000 |
4781 |
trans[i].vxid_parent = trans[j].vxid; |
| 1001 |
4781 |
trans[i].type = vtx->type; |
| 1002 |
4781 |
trans[i].reason = vtx->reason; |
| 1003 |
4781 |
trans[i].c = &vtx->c.cursor; |
| 1004 |
4781 |
i++; |
| 1005 |
4781 |
} |
| 1006 |
46113 |
j++; |
| 1007 |
|
} |
| 1008 |
41321 |
assert(i == n); |
| 1009 |
|
|
| 1010 |
|
/* Build pointer array */ |
| 1011 |
87430 |
for (i = 0; i < n; i++) |
| 1012 |
46109 |
ptrans[i] = &trans[i]; |
| 1013 |
41321 |
ptrans[i] = NULL; |
| 1014 |
|
|
| 1015 |
|
/* Query test goes here */ |
| 1016 |
41321 |
if (vslq->query != NULL && !vslq_runquery(vslq->query, ptrans)) |
| 1017 |
14241 |
return (0); |
| 1018 |
|
|
| 1019 |
27080 |
if (vslq->vsl->R_opt_l != 0 && !vslq_ratelimit(vslq)) |
| 1020 |
0 |
return (0); |
| 1021 |
|
|
| 1022 |
|
/* Callback */ |
| 1023 |
27080 |
return ((func)(vslq->vsl, ptrans, priv)); |
| 1024 |
41321 |
} |
| 1025 |
|
|
| 1026 |
|
/* Create a synthetic log record. The record will be inserted at the |
| 1027 |
|
current cursor offset */ |
| 1028 |
|
static void |
| 1029 |
12184 |
vtx_synth_rec(struct vtx *vtx, unsigned tag, const char *fmt, ...) |
| 1030 |
|
{ |
| 1031 |
|
struct synth *synth, *it; |
| 1032 |
|
va_list ap; |
| 1033 |
|
char *buf; |
| 1034 |
|
int l, buflen; |
| 1035 |
|
uint64_t vxid; |
| 1036 |
|
|
| 1037 |
12184 |
ALLOC_OBJ(synth, SYNTH_MAGIC); |
| 1038 |
12184 |
AN(synth); |
| 1039 |
|
|
| 1040 |
12184 |
buf = VSL_DATA(synth->data); |
| 1041 |
12184 |
buflen = sizeof(synth->data) - VSL_BYTES(VSL_OVERHEAD); |
| 1042 |
12184 |
va_start(ap, fmt); |
| 1043 |
12184 |
l = vsnprintf(buf, buflen, fmt, ap); |
| 1044 |
12184 |
assert(l >= 0); |
| 1045 |
12184 |
va_end(ap); |
| 1046 |
12184 |
if (l > buflen - 1) |
| 1047 |
0 |
l = buflen - 1; |
| 1048 |
12184 |
buf[l++] = '\0'; /* NUL-terminated */ |
| 1049 |
12184 |
vxid = vtx->key.vxid; |
| 1050 |
12184 |
switch (vtx->type) { |
| 1051 |
|
case VSL_t_req: |
| 1052 |
726 |
vxid |= VSL_CLIENTMARKER; |
| 1053 |
726 |
break; |
| 1054 |
|
case VSL_t_bereq: |
| 1055 |
80 |
vxid |= VSL_BACKENDMARKER; |
| 1056 |
80 |
break; |
| 1057 |
|
default: |
| 1058 |
11378 |
break; |
| 1059 |
|
} |
| 1060 |
12184 |
synth->data[2] = vxid >> 32; |
| 1061 |
12184 |
synth->data[1] = vxid; |
| 1062 |
24368 |
synth->data[0] = (((tag & VSL_IDMASK) << VSL_IDSHIFT) | |
| 1063 |
12184 |
(VSL_VERSION_3 << VSL_VERSHIFT) | l); |
| 1064 |
12184 |
synth->offset = vtx->c.offset; |
| 1065 |
|
|
| 1066 |
12184 |
VTAILQ_FOREACH_REVERSE(it, &vtx->synth, synthhead, list) { |
| 1067 |
|
/* Make sure the synth list is sorted on offset */ |
| 1068 |
6093 |
CHECK_OBJ_NOTNULL(it, SYNTH_MAGIC); |
| 1069 |
6093 |
if (synth->offset >= it->offset) |
| 1070 |
6093 |
break; |
| 1071 |
0 |
} |
| 1072 |
12184 |
if (it != NULL) |
| 1073 |
6093 |
VTAILQ_INSERT_AFTER(&vtx->synth, it, synth, list); |
| 1074 |
|
else |
| 1075 |
6091 |
VTAILQ_INSERT_HEAD(&vtx->synth, synth, list); |
| 1076 |
|
|
| 1077 |
|
/* Update cursor */ |
| 1078 |
12184 |
CHECK_OBJ_ORNULL(vtx->c.synth, SYNTH_MAGIC); |
| 1079 |
12184 |
if (vtx->c.synth == NULL || vtx->c.synth->offset > synth->offset) |
| 1080 |
6091 |
vtx->c.synth = synth; |
| 1081 |
12184 |
} |
| 1082 |
|
|
| 1083 |
|
/* Add a diagnostic SLT_VSL synth record to the vtx. */ |
| 1084 |
|
static int |
| 1085 |
6091 |
vtx_diag(struct vtx *vtx, const char *msg) |
| 1086 |
|
{ |
| 1087 |
|
|
| 1088 |
6091 |
vtx_synth_rec(vtx, SLT_VSL, msg); |
| 1089 |
6091 |
return (-1); |
| 1090 |
|
} |
| 1091 |
|
|
| 1092 |
|
/* Add a SLT_VSL diag synth record to the vtx. Takes an offending record |
| 1093 |
|
that will be included in the log record */ |
| 1094 |
|
static int |
| 1095 |
0 |
vtx_diag_tag(struct vtx *vtx, const uint32_t *ptr, const char *reason) |
| 1096 |
|
{ |
| 1097 |
|
|
| 1098 |
0 |
vtx_synth_rec(vtx, SLT_VSL, "%s (%ju:%s \"%.*s\")", reason, VSL_ID(ptr), |
| 1099 |
0 |
VSL_tags[VSL_TAG(ptr)], (int)VSL_LEN(ptr), VSL_CDATA(ptr)); |
| 1100 |
0 |
return (-1); |
| 1101 |
|
} |
| 1102 |
|
|
| 1103 |
|
struct VSLQ * |
| 1104 |
16240 |
VSLQ_New(struct VSL_data *vsl, struct VSL_cursor **cp, |
| 1105 |
|
enum VSL_grouping_e grouping, const char *querystring) |
| 1106 |
|
{ |
| 1107 |
|
struct vslq_query *query; |
| 1108 |
|
struct VSLQ *vslq; |
| 1109 |
|
|
| 1110 |
16240 |
CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC); |
| 1111 |
16240 |
if (grouping >= VSL_g__MAX) { |
| 1112 |
0 |
(void)vsl_diag(vsl, "Illegal query grouping"); |
| 1113 |
0 |
return (NULL); |
| 1114 |
|
} |
| 1115 |
16240 |
if (querystring != NULL) { |
| 1116 |
7360 |
query = vslq_newquery(vsl, grouping, querystring); |
| 1117 |
7360 |
if (query == NULL) |
| 1118 |
1280 |
return (NULL); |
| 1119 |
6080 |
} else |
| 1120 |
8880 |
query = NULL; |
| 1121 |
|
|
| 1122 |
14960 |
ALLOC_OBJ(vslq, VSLQ_MAGIC); |
| 1123 |
14960 |
AN(vslq); |
| 1124 |
14960 |
vslq->vsl = vsl; |
| 1125 |
14960 |
if (cp != NULL) { |
| 1126 |
11200 |
vslq->c = *cp; |
| 1127 |
11200 |
*cp = NULL; |
| 1128 |
11200 |
} |
| 1129 |
14960 |
vslq->grouping = grouping; |
| 1130 |
14960 |
vslq->query = query; |
| 1131 |
14960 |
if (vslq->vsl->R_opt_l != 0) { |
| 1132 |
80 |
vslq->last_use = VTIM_mono(); |
| 1133 |
80 |
vslq->credits = 1; |
| 1134 |
80 |
} |
| 1135 |
|
|
| 1136 |
|
/* Setup normal mode */ |
| 1137 |
14960 |
VRBT_INIT(&vslq->tree); |
| 1138 |
14960 |
VTAILQ_INIT(&vslq->ready); |
| 1139 |
14960 |
VTAILQ_INIT(&vslq->incomplete); |
| 1140 |
14960 |
VTAILQ_INIT(&vslq->shmrefs); |
| 1141 |
14960 |
VTAILQ_INIT(&vslq->cache); |
| 1142 |
|
|
| 1143 |
|
/* Setup raw mode */ |
| 1144 |
14960 |
vslq->raw.c.magic = VSLC_RAW_MAGIC; |
| 1145 |
14960 |
vslq->raw.c.cursor.priv_tbl = &vslc_raw_tbl; |
| 1146 |
14960 |
vslq->raw.c.cursor.priv_data = &vslq->raw.c; |
| 1147 |
14960 |
vslq->raw.trans.level = 0; |
| 1148 |
14960 |
vslq->raw.trans.type = VSL_t_raw; |
| 1149 |
14960 |
vslq->raw.trans.reason = VSL_r_unknown; |
| 1150 |
14960 |
vslq->raw.trans.c = &vslq->raw.c.cursor; |
| 1151 |
14960 |
vslq->raw.ptrans[0] = &vslq->raw.trans; |
| 1152 |
14960 |
vslq->raw.ptrans[1] = NULL; |
| 1153 |
|
|
| 1154 |
14960 |
return (vslq); |
| 1155 |
16240 |
} |
| 1156 |
|
|
| 1157 |
|
void |
| 1158 |
14720 |
VSLQ_Delete(struct VSLQ **pvslq) |
| 1159 |
|
{ |
| 1160 |
|
struct VSLQ *vslq; |
| 1161 |
|
struct vtx *vtx; |
| 1162 |
|
|
| 1163 |
14720 |
TAKE_OBJ_NOTNULL(vslq, pvslq, VSLQ_MAGIC); |
| 1164 |
|
|
| 1165 |
14720 |
(void)VSLQ_Flush(vslq, NULL, NULL); |
| 1166 |
14720 |
AZ(vslq->n_outstanding); |
| 1167 |
|
|
| 1168 |
14720 |
if (vslq->c != NULL) { |
| 1169 |
14720 |
VSL_DeleteCursor(vslq->c); |
| 1170 |
14720 |
vslq->c = NULL; |
| 1171 |
14720 |
} |
| 1172 |
|
|
| 1173 |
14720 |
if (vslq->query != NULL) |
| 1174 |
6080 |
vslq_deletequery(&vslq->query); |
| 1175 |
14720 |
AZ(vslq->query); |
| 1176 |
|
|
| 1177 |
38291 |
while (!VTAILQ_EMPTY(&vslq->cache)) { |
| 1178 |
23571 |
AN(vslq->n_cache); |
| 1179 |
23571 |
vtx = VTAILQ_FIRST(&vslq->cache); |
| 1180 |
23571 |
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC); |
| 1181 |
23571 |
VTAILQ_REMOVE(&vslq->cache, vtx, list_child); |
| 1182 |
23571 |
vslq->n_cache--; |
| 1183 |
23571 |
FREE_OBJ(vtx); |
| 1184 |
|
} |
| 1185 |
|
|
| 1186 |
14720 |
FREE_OBJ(vslq); |
| 1187 |
14720 |
} |
| 1188 |
|
|
| 1189 |
|
void |
| 1190 |
3520 |
VSLQ_SetCursor(struct VSLQ *vslq, struct VSL_cursor **cp) |
| 1191 |
|
{ |
| 1192 |
|
|
| 1193 |
3520 |
CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC); |
| 1194 |
|
|
| 1195 |
3520 |
if (vslq->c != NULL) { |
| 1196 |
0 |
(void)VSLQ_Flush(vslq, NULL, NULL); |
| 1197 |
0 |
AZ(vslq->n_outstanding); |
| 1198 |
0 |
VSL_DeleteCursor(vslq->c); |
| 1199 |
0 |
vslq->c = NULL; |
| 1200 |
0 |
} |
| 1201 |
|
|
| 1202 |
3520 |
if (cp != NULL) { |
| 1203 |
3520 |
AN(*cp); |
| 1204 |
3520 |
vslq->c = *cp; |
| 1205 |
3520 |
*cp = NULL; |
| 1206 |
3520 |
} |
| 1207 |
3520 |
} |
| 1208 |
|
|
| 1209 |
|
/* Regard each log line as a single transaction, feed it through the query |
| 1210 |
|
and do the callback */ |
| 1211 |
|
static int |
| 1212 |
818835 |
vslq_raw(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv) |
| 1213 |
|
{ |
| 1214 |
818835 |
enum vsl_status r = vsl_more; |
| 1215 |
|
int i; |
| 1216 |
|
|
| 1217 |
818835 |
assert(vslq->grouping == VSL_g_raw); |
| 1218 |
|
|
| 1219 |
818835 |
assert(vslq->raw.offset <= vslq->raw.len); |
| 1220 |
818835 |
do { |
| 1221 |
952026 |
if (vslq->raw.offset == vslq->raw.len) { |
| 1222 |
494425 |
r = VSL_Next(vslq->c); |
| 1223 |
494425 |
if (r != vsl_more) |
| 1224 |
227462 |
return (r); |
| 1225 |
266963 |
AN(vslq->c->rec.ptr); |
| 1226 |
266963 |
vslq->raw.start = vslq->c->rec; |
| 1227 |
266963 |
if (VSL_TAG(vslq->c->rec.ptr) == SLT__Batch) |
| 1228 |
266386 |
vslq->raw.len = VSL_END(vslq->c->rec.ptr, |
| 1229 |
133193 |
VSL_BATCHLEN(vslq->c->rec.ptr)) - |
| 1230 |
133193 |
vslq->c->rec.ptr; |
| 1231 |
|
else |
| 1232 |
267540 |
vslq->raw.len = VSL_NEXT(vslq->raw.start.ptr) - |
| 1233 |
133770 |
vslq->raw.start.ptr; |
| 1234 |
266963 |
assert(vslq->raw.len > 0); |
| 1235 |
266963 |
vslq->raw.offset = 0; |
| 1236 |
266963 |
} |
| 1237 |
|
|
| 1238 |
724564 |
vslq->raw.c.ptr = vslq->raw.start.ptr + vslq->raw.offset; |
| 1239 |
724564 |
vslq->raw.c.cursor.rec.ptr = NULL; |
| 1240 |
724564 |
vslq->raw.trans.vxid = VSL_ID(vslq->raw.c.ptr); |
| 1241 |
724564 |
vslq->raw.offset += VSL_NEXT(vslq->raw.c.ptr) - vslq->raw.c.ptr; |
| 1242 |
724564 |
} while (VSL_TAG(vslq->raw.c.ptr) == SLT__Batch); |
| 1243 |
|
|
| 1244 |
591373 |
assert (r == vsl_more); |
| 1245 |
|
|
| 1246 |
591373 |
if (func == NULL) |
| 1247 |
0 |
return (r); |
| 1248 |
|
|
| 1249 |
591373 |
if (vslq->query != NULL && |
| 1250 |
88674 |
!vslq_runquery(vslq->query, vslq->raw.ptrans)) |
| 1251 |
81634 |
return (r); |
| 1252 |
|
|
| 1253 |
509739 |
if (vslq->vsl->R_opt_l != 0 && !vslq_ratelimit(vslq)) |
| 1254 |
0 |
return (r); |
| 1255 |
|
|
| 1256 |
509739 |
i = (func)(vslq->vsl, vslq->raw.ptrans, priv); |
| 1257 |
509739 |
if (i) |
| 1258 |
3960 |
return (i); |
| 1259 |
|
|
| 1260 |
505779 |
return (r); |
| 1261 |
818835 |
} |
| 1262 |
|
|
| 1263 |
|
/* Check the beginning of the shmref list, and buffer refs that are at |
| 1264 |
|
* warning level. |
| 1265 |
|
*/ |
| 1266 |
|
static enum vsl_status |
| 1267 |
490414 |
vslq_shmref_check(struct VSLQ *vslq) |
| 1268 |
|
{ |
| 1269 |
|
struct chunk *chunk; |
| 1270 |
|
enum vsl_check i; |
| 1271 |
|
|
| 1272 |
490414 |
while ((chunk = VTAILQ_FIRST(&vslq->shmrefs)) != NULL) { |
| 1273 |
358576 |
CHECK_OBJ_NOTNULL(chunk, CHUNK_MAGIC); |
| 1274 |
358576 |
assert(chunk->type == chunk_t_shm); |
| 1275 |
358576 |
i = VSL_Check(vslq->c, &chunk->shm.start); |
| 1276 |
358576 |
switch (i) { |
| 1277 |
|
case vsl_check_valid: |
| 1278 |
|
/* First on list is OK, refs behind it must also |
| 1279 |
|
be OK */ |
| 1280 |
358576 |
return (vsl_more); |
| 1281 |
|
case vsl_check_warn: |
| 1282 |
|
/* Buffer this chunk */ |
| 1283 |
0 |
chunk_shm_to_buf(vslq, chunk); |
| 1284 |
0 |
break; |
| 1285 |
|
default: |
| 1286 |
|
/* Too late to buffer */ |
| 1287 |
0 |
return (vsl_e_overrun); |
| 1288 |
|
} |
| 1289 |
|
} |
| 1290 |
|
|
| 1291 |
131838 |
return (vsl_more); |
| 1292 |
490414 |
} |
| 1293 |
|
|
| 1294 |
|
static unsigned |
| 1295 |
59130 |
vslq_candidate(struct VSLQ *vslq, const uint32_t *ptr) |
| 1296 |
|
{ |
| 1297 |
|
enum VSL_transaction_e type; |
| 1298 |
|
enum VSL_reason_e reason; |
| 1299 |
|
struct VSL_data *vsl; |
| 1300 |
|
enum VSL_tag_e tag; |
| 1301 |
|
uint64_t p_vxid, sub; |
| 1302 |
|
int i; |
| 1303 |
|
|
| 1304 |
59130 |
CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC); |
| 1305 |
59130 |
AN(ptr); |
| 1306 |
|
|
| 1307 |
59130 |
assert(vslq->grouping != VSL_g_raw); |
| 1308 |
59130 |
if (vslq->grouping == VSL_g_session) |
| 1309 |
4277 |
return (1); /* All are needed */ |
| 1310 |
|
|
| 1311 |
54853 |
vsl = vslq->vsl; |
| 1312 |
54853 |
CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC); |
| 1313 |
54853 |
if (vslq->grouping == VSL_g_vxid) { |
| 1314 |
50691 |
if (!vsl->c_opt && !vsl->b_opt) |
| 1315 |
36331 |
AZ(vsl->E_opt); |
| 1316 |
14360 |
else if (!vsl->b_opt && !VSL_CLIENT(ptr)) |
| 1317 |
3320 |
return (0); |
| 1318 |
11040 |
else if (!vsl->c_opt && !VSL_BACKEND(ptr)) |
| 1319 |
1680 |
return (0); |
| 1320 |
|
/* Need to parse the Begin tag - fallthrough to below */ |
| 1321 |
45691 |
} |
| 1322 |
|
|
| 1323 |
49853 |
tag = VSL_TAG(ptr); |
| 1324 |
49853 |
assert(tag == SLT_Begin); |
| 1325 |
49853 |
i = vtx_parse_link(VSL_CDATA(ptr), &type, &p_vxid, &reason, &sub); |
| 1326 |
49853 |
if (i < 3 || type == VSL_t_unknown) |
| 1327 |
6 |
return (0); |
| 1328 |
|
|
| 1329 |
49849 |
if (vslq->grouping == VSL_g_request && type == VSL_t_sess) |
| 1330 |
800 |
return (0); |
| 1331 |
|
|
| 1332 |
49049 |
if (vslq->grouping == VSL_g_vxid && i > 3 && sub > 0 && !vsl->E_opt) |
| 1333 |
600 |
return (0); |
| 1334 |
|
|
| 1335 |
48449 |
return (1); |
| 1336 |
59126 |
} |
| 1337 |
|
|
| 1338 |
|
/* Process next input record */ |
| 1339 |
|
static enum vsl_status |
| 1340 |
891868 |
vslq_next(struct VSLQ *vslq) |
| 1341 |
|
{ |
| 1342 |
|
const uint32_t *ptr; |
| 1343 |
|
struct VSL_cursor *c; |
| 1344 |
|
enum vsl_status r; |
| 1345 |
|
enum VSL_tag_e tag; |
| 1346 |
|
ssize_t len; |
| 1347 |
|
uint64_t vxid; |
| 1348 |
|
unsigned keep; |
| 1349 |
|
struct vtx *vtx; |
| 1350 |
|
|
| 1351 |
891868 |
c = vslq->c; |
| 1352 |
891868 |
r = VSL_Next(c); |
| 1353 |
891868 |
if (r != vsl_more) |
| 1354 |
401137 |
return (r); |
| 1355 |
|
|
| 1356 |
490731 |
assert (r == vsl_more); |
| 1357 |
|
|
| 1358 |
490731 |
tag = (enum VSL_tag_e)VSL_TAG(c->rec.ptr); |
| 1359 |
490731 |
if (tag == SLT__Batch) { |
| 1360 |
153134 |
vxid = VSL_BATCHID(c->rec.ptr); |
| 1361 |
306268 |
len = VSL_END(c->rec.ptr, VSL_BATCHLEN(c->rec.ptr)) - |
| 1362 |
153134 |
c->rec.ptr; |
| 1363 |
153134 |
if (len == 0) |
| 1364 |
0 |
return (r); |
| 1365 |
153134 |
ptr = VSL_NEXT(c->rec.ptr); |
| 1366 |
153134 |
tag = (enum VSL_tag_e)VSL_TAG(ptr); |
| 1367 |
153134 |
} else { |
| 1368 |
337597 |
vxid = VSL_ID(c->rec.ptr); |
| 1369 |
337597 |
len = VSL_NEXT(c->rec.ptr) - c->rec.ptr; |
| 1370 |
337597 |
ptr = c->rec.ptr; |
| 1371 |
|
} |
| 1372 |
490731 |
assert(len > 0); |
| 1373 |
490731 |
if (vxid == 0) |
| 1374 |
|
/* Skip non-transactional records */ |
| 1375 |
110789 |
return (r); |
| 1376 |
|
|
| 1377 |
379942 |
vtx = vtx_lookup(vslq, vxid); |
| 1378 |
379942 |
keep = tag != SLT_Begin || vslq_candidate(vslq, ptr); |
| 1379 |
379942 |
if (vtx == NULL && tag == SLT_Begin && keep) { |
| 1380 |
49010 |
vtx = vtx_add(vslq, vxid); |
| 1381 |
49010 |
AN(vtx); |
| 1382 |
49010 |
} |
| 1383 |
379942 |
if (vtx != NULL) { |
| 1384 |
351916 |
AN(keep); |
| 1385 |
351916 |
r = vtx_append(vslq, vtx, &c->rec, len); |
| 1386 |
351916 |
if (r == vsl_more) |
| 1387 |
351608 |
vtx_scan(vslq, vtx); |
| 1388 |
351934 |
} |
| 1389 |
|
|
| 1390 |
379960 |
return (r); |
| 1391 |
891886 |
} |
| 1392 |
|
|
| 1393 |
|
/* Test query and report any ready transactions */ |
| 1394 |
|
static int |
| 1395 |
56043 |
vslq_process_ready(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv) |
| 1396 |
|
{ |
| 1397 |
|
struct vtx *vtx; |
| 1398 |
56043 |
int i = 0; |
| 1399 |
|
|
| 1400 |
56043 |
AN(vslq); |
| 1401 |
|
|
| 1402 |
96093 |
while (!VTAILQ_EMPTY(&vslq->ready)) { |
| 1403 |
47330 |
vtx = VTAILQ_FIRST(&vslq->ready); |
| 1404 |
47330 |
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC); |
| 1405 |
47330 |
VTAILQ_REMOVE(&vslq->ready, vtx, list_vtx); |
| 1406 |
47330 |
AN(vtx->flags & VTX_F_READY); |
| 1407 |
47330 |
if (func != NULL) |
| 1408 |
41321 |
i = vslq_callback(vslq, vtx, func, priv); |
| 1409 |
47330 |
vtx_retire(vslq, &vtx); |
| 1410 |
47330 |
AZ(vtx); |
| 1411 |
47330 |
if (i) |
| 1412 |
7280 |
return (i); |
| 1413 |
|
} |
| 1414 |
|
|
| 1415 |
48763 |
return (0); |
| 1416 |
56043 |
} |
| 1417 |
|
|
| 1418 |
|
/* Process the input cursor, calling the callback function on matching |
| 1419 |
|
transaction sets */ |
| 1420 |
|
int |
| 1421 |
1710357 |
VSLQ_Dispatch(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv) |
| 1422 |
|
{ |
| 1423 |
|
enum vsl_status r; |
| 1424 |
|
int i; |
| 1425 |
|
double now; |
| 1426 |
|
struct vtx *vtx; |
| 1427 |
|
|
| 1428 |
1710357 |
CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC); |
| 1429 |
|
|
| 1430 |
|
/* Check that we have a cursor */ |
| 1431 |
1710357 |
if (vslq->c == NULL) |
| 1432 |
0 |
return (vsl_e_abandon); |
| 1433 |
|
|
| 1434 |
1710357 |
if (vslq->grouping == VSL_g_raw) |
| 1435 |
818834 |
return (vslq_raw(vslq, func, priv)); |
| 1436 |
|
|
| 1437 |
|
/* Process next cursor input */ |
| 1438 |
891523 |
r = vslq_next(vslq); |
| 1439 |
891523 |
if (r != vsl_more) |
| 1440 |
|
/* At end of log or cursor reports error condition */ |
| 1441 |
400714 |
return (r); |
| 1442 |
|
|
| 1443 |
|
/* Check shmref list and buffer if necessary */ |
| 1444 |
490809 |
r = vslq_shmref_check(vslq); |
| 1445 |
490809 |
if (r != vsl_more) |
| 1446 |
|
/* Buffering of shm ref failed */ |
| 1447 |
0 |
return (r); |
| 1448 |
|
|
| 1449 |
490809 |
assert (r == vsl_more); |
| 1450 |
|
|
| 1451 |
|
/* Check vtx timeout */ |
| 1452 |
490809 |
now = VTIM_mono(); |
| 1453 |
490889 |
while (!VTAILQ_EMPTY(&vslq->incomplete)) { |
| 1454 |
351239 |
vtx = VTAILQ_FIRST(&vslq->incomplete); |
| 1455 |
351239 |
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC); |
| 1456 |
351239 |
if (now - vtx->t_start < vslq->vsl->T_opt) |
| 1457 |
351159 |
break; |
| 1458 |
80 |
vtx_force(vslq, vtx, "timeout"); |
| 1459 |
80 |
AN(vtx->flags & VTX_F_COMPLETE); |
| 1460 |
|
} |
| 1461 |
|
|
| 1462 |
|
/* Check store limit */ |
| 1463 |
490809 |
while (vslq->n_outstanding > vslq->vsl->L_opt && |
| 1464 |
0 |
!(VTAILQ_EMPTY(&vslq->incomplete))) { |
| 1465 |
0 |
vtx = VTAILQ_FIRST(&vslq->incomplete); |
| 1466 |
0 |
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC); |
| 1467 |
0 |
vtx_force(vslq, vtx, "store overflow"); |
| 1468 |
0 |
AN(vtx->flags & VTX_F_COMPLETE); |
| 1469 |
0 |
i = vslq_process_ready(vslq, func, priv); |
| 1470 |
0 |
if (i) |
| 1471 |
|
/* User return code */ |
| 1472 |
0 |
return (i); |
| 1473 |
|
} |
| 1474 |
|
|
| 1475 |
|
/* Check ready list */ |
| 1476 |
490809 |
if (!VTAILQ_EMPTY(&vslq->ready)) { |
| 1477 |
41322 |
i = vslq_process_ready(vslq, func, priv); |
| 1478 |
41322 |
if (i) |
| 1479 |
|
/* User return code */ |
| 1480 |
7280 |
return (i); |
| 1481 |
34042 |
} |
| 1482 |
|
|
| 1483 |
483529 |
return (vsl_more); |
| 1484 |
1710357 |
} |
| 1485 |
|
|
| 1486 |
|
/* Flush any incomplete vtx held on to. Do callbacks if func != NULL */ |
| 1487 |
|
int |
| 1488 |
14720 |
VSLQ_Flush(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv) |
| 1489 |
|
{ |
| 1490 |
|
struct vtx *vtx; |
| 1491 |
|
|
| 1492 |
14720 |
CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC); |
| 1493 |
|
|
| 1494 |
20731 |
while (!VTAILQ_EMPTY(&vslq->incomplete)) { |
| 1495 |
6011 |
vtx = VTAILQ_FIRST(&vslq->incomplete); |
| 1496 |
6011 |
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC); |
| 1497 |
6011 |
AZ(vtx->flags & VTX_F_COMPLETE); |
| 1498 |
6011 |
vtx_force(vslq, vtx, "flush"); |
| 1499 |
|
} |
| 1500 |
|
|
| 1501 |
14720 |
return (vslq_process_ready(vslq, func, priv)); |
| 1502 |
|
} |