varnish-cache/bin/varnishd/cache/cache_shmlog.c
1
/*-
2
 * Copyright (c) 2006 Verdens Gang AS
3
 * Copyright (c) 2006-2015 Varnish Software AS
4
 * All rights reserved.
5
 *
6
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
#include "config.h"
31
32
#include "cache_varnishd.h"
33
34
#include <errno.h>
35
#include <stdio.h>
36
#include <stdlib.h>
37
38
#include "common/heritage.h"
39
40
#include "vend.h"
41
#include "vgz.h"
42
#include "vsl_priv.h"
43
#include "vmb.h"
44
#include "vsmw.h"
45
46
/* These cannot be struct lock, which depends on vsm/vsl working */
47
static pthread_mutex_t vsl_mtx;
48
static pthread_mutex_t vsm_mtx;
49
50
static struct VSL_head          *vsl_head;
51
static const uint32_t           *vsl_end;
52
static uint32_t                 *vsl_ptr;
53
static unsigned                 vsl_segment_n;
54
static ssize_t                  vsl_segsize;
55
56
struct VSC_main *VSC_C_main;
57
58
static void
59 608992
vsl_sanity(const struct vsl_log *vsl)
60
{
61 608992
        AN(vsl);
62 608992
        AN(vsl->wlp);
63 608992
        AN(vsl->wlb);
64 608992
        AN(vsl->wle);
65 608992
}
66
67
/*--------------------------------------------------------------------
68
 * Check if the VSL_tag is masked by parameter bitmap
69
 */
70
71
static inline int
72 415745
vsl_tag_is_masked(enum VSL_tag_e tag)
73
{
74 415745
        volatile uint8_t *bm = &cache_param->vsl_mask[0];
75
        uint8_t b;
76
77 415745
        assert(tag > SLT__Bogus);
78 415745
        assert(tag < SLT__Reserved);
79 415745
        bm += ((unsigned)tag >> 3);
80 415745
        b = (0x80 >> ((unsigned)tag & 7));
81 415745
        return (*bm & b);
82
}
83
84
/*--------------------------------------------------------------------
85
 * Lay down a header fields, and return pointer to the next record
86
 */
87
88
static inline uint32_t *
89 285340
vsl_hdr(enum VSL_tag_e tag, uint32_t *p, unsigned len, uint32_t vxid)
90
{
91
92 285340
        AZ((uintptr_t)p & 0x3);
93 285340
        assert(tag > SLT__Bogus);
94 285340
        assert(tag < SLT__Reserved);
95 285340
        AZ(len & ~VSL_LENMASK);
96
97 285340
        p[1] = vxid;
98 285340
        p[0] = ((((unsigned)tag & 0xff) << 24) | len);
99 285340
        return (VSL_END(p, len));
100
}
101
102
/*--------------------------------------------------------------------
103
 * Wrap the VSL buffer
104
 */
105
106
static void
107 0
vsl_wrap(void)
108
{
109
110 0
        assert(vsl_ptr >= vsl_head->log);
111 0
        assert(vsl_ptr < vsl_end);
112 0
        vsl_segment_n += VSL_SEGMENTS - (vsl_segment_n % VSL_SEGMENTS);
113 0
        assert(vsl_segment_n % VSL_SEGMENTS == 0);
114 0
        vsl_head->offset[0] = 0;
115 0
        vsl_head->log[0] = VSL_ENDMARKER;
116 0
        VWMB();
117 0
        if (vsl_ptr != vsl_head->log) {
118 0
                *vsl_ptr = VSL_WRAPMARKER;
119 0
                vsl_ptr = vsl_head->log;
120
        }
121 0
        vsl_head->segment_n = vsl_segment_n;
122 0
        VSC_C_main->shm_cycles++;
123 0
}
124
125
/*--------------------------------------------------------------------
126
 * Reserve bytes for a record, wrap if necessary
127
 */
128
129
static uint32_t *
130 68233
vsl_get(unsigned len, unsigned records, unsigned flushes)
131
{
132
        uint32_t *p;
133
        int err;
134
135 68233
        err = pthread_mutex_trylock(&vsl_mtx);
136 68245
        if (err == EBUSY) {
137 124
                AZ(pthread_mutex_lock(&vsl_mtx));
138 124
                VSC_C_main->shm_cont++;
139
        } else {
140 68121
                AZ(err);
141
        }
142 68245
        assert(vsl_ptr < vsl_end);
143 68245
        AZ((uintptr_t)vsl_ptr & 0x3);
144
145 68245
        VSC_C_main->shm_writes++;
146 68245
        VSC_C_main->shm_flushes += flushes;
147 68245
        VSC_C_main->shm_records += records;
148
149
        /* Wrap if necessary */
150 68245
        if (VSL_END(vsl_ptr, len) >= vsl_end)
151 0
                vsl_wrap();
152
153 68245
        p = vsl_ptr;
154 68245
        vsl_ptr = VSL_END(vsl_ptr, len);
155 68245
        assert(vsl_ptr < vsl_end);
156 68245
        AZ((uintptr_t)vsl_ptr & 0x3);
157
158 68245
        *vsl_ptr = VSL_ENDMARKER;
159
160 136490
        while ((vsl_ptr - vsl_head->log) / vsl_segsize >
161
            vsl_segment_n % VSL_SEGMENTS) {
162 0
                vsl_segment_n++;
163 0
                vsl_head->offset[vsl_segment_n % VSL_SEGMENTS] =
164 0
                    vsl_ptr - vsl_head->log;
165
        }
166
167 68245
        AZ(pthread_mutex_unlock(&vsl_mtx));
168
        /* Implicit VWMB() in mutex op ensures ENDMARKER and new table
169
           values are seen before new segment number */
170 68245
        vsl_head->segment_n = vsl_segment_n;
171
172 68245
        return (p);
173
}
174
175
/*--------------------------------------------------------------------
176
 * Stick a finished record into VSL.
177
 */
178
179
static void
180 40520
vslr(enum VSL_tag_e tag, uint32_t vxid, const char *b, unsigned len)
181
{
182
        uint32_t *p;
183
        unsigned mlen;
184
185 40520
        mlen = cache_param->vsl_reclen;
186
187
        /* Truncate */
188 40520
        if (len > mlen)
189 0
                len = mlen;
190
191 40520
        p = vsl_get(len, 1, 0);
192
193 40527
        memcpy(p + 2, b, len);
194
195
        /*
196
         * vsl_hdr() writes p[1] again, but we want to make sure it
197
         * has hit memory because we work on the live buffer here.
198
         */
199 40527
        p[1] = vxid;
200 40527
        VWMB();
201 40527
        (void)vsl_hdr(tag, p, len, vxid);
202 40527
}
203
204
/*--------------------------------------------------------------------
205
 * Add a unbuffered record to VSL
206
 *
207
 * NB: This variant should be used sparingly and only for low volume
208
 * NB: since it significantly adds to the mutex load on the VSL.
209
 */
210
211
void
212 67035
VSLv(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, va_list ap)
213
{
214 67035
        unsigned n, mlen = cache_param->vsl_reclen;
215 67035
        char buf[mlen];
216
217 67035
        AN(fmt);
218 67035
        if (vsl_tag_is_masked(tag))
219 93549
                return;
220
221 40524
        if (strchr(fmt, '%') == NULL) {
222 1474
                vslr(tag, vxid, fmt, strlen(fmt) + 1);
223
        } else {
224 39050
                n = vsnprintf(buf, mlen, fmt, ap);
225 39050
                if (n > mlen - 1)
226 171
                        n = mlen - 1;
227 39050
                buf[n++] = '\0'; /* NUL-terminated */
228 39050
                vslr(tag, vxid, buf, n);
229
        }
230
231
}
232
233
void
234 67014
VSL(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, ...)
235
{
236
        va_list ap;
237
238 67014
        va_start(ap, fmt);
239 67014
        VSLv(tag, vxid, fmt, ap);
240 67020
        va_end(ap);
241 67020
}
242
243
/*--------------------------------------------------------------------*/
244
245
void
246 33030
VSL_Flush(struct vsl_log *vsl, int overflow)
247
{
248
        uint32_t *p;
249
        unsigned l;
250
251 33030
        vsl_sanity(vsl);
252 33028
        l = pdiff(vsl->wlb, vsl->wlp);
253 33030
        if (l == 0)
254 38349
                return;
255
256 27714
        assert(l >= 8);
257
258 27714
        p = vsl_get(l, vsl->wlr, overflow);
259
260 27718
        memcpy(p + 2, vsl->wlb, l);
261 27718
        p[1] = l;
262 27718
        VWMB();
263 27717
        p[0] = ((((unsigned)SLT__Batch & 0xff) << 24) | 0);
264 27717
        vsl->wlp = vsl->wlb;
265 27717
        vsl->wlr = 0;
266
}
267
268
/*--------------------------------------------------------------------
269
 * VSL-buffered-txt
270
 */
271
272
void
273 163793
VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
274
{
275
        unsigned l, mlen;
276
        char *p;
277
278 163793
        vsl_sanity(vsl);
279 163792
        Tcheck(t);
280 163792
        if (vsl_tag_is_masked(tag))
281 163779
                return;
282 163786
        mlen = cache_param->vsl_reclen;
283
284
        /* Truncate */
285 163786
        l = Tlen(t);
286 163784
        if (l > mlen - 1)
287 396
                l = mlen - 1;
288
289 163784
        assert(vsl->wlp < vsl->wle);
290
291
        /* Flush if necessary */
292 163784
        if (VSL_END(vsl->wlp, l + 1) >= vsl->wle)
293 12
                VSL_Flush(vsl, 1);
294 163785
        assert(VSL_END(vsl->wlp, l + 1) < vsl->wle);
295 163785
        p = VSL_DATA(vsl->wlp);
296 163785
        memcpy(p, t.b, l);
297 163785
        p[l++] = '\0';          /* NUL-terminated */
298 163785
        vsl->wlp = vsl_hdr(tag, vsl->wlp, l, vsl->wid);
299 163779
        assert(vsl->wlp < vsl->wle);
300 163779
        vsl->wlr++;
301
302 163779
        if (DO_DEBUG(DBG_SYNCVSL))
303 12365
                VSL_Flush(vsl, 0);
304
}
305
306
/*--------------------------------------------------------------------
307
 * VSL-buffered
308
 */
309
310
void
311 183611
VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
312
{
313
        char *p;
314
        const char *u, *f;
315
        unsigned n, mlen;
316
        txt t;
317
318 183611
        vsl_sanity(vsl);
319 183602
        AN(fmt);
320 183602
        if (vsl_tag_is_masked(tag))
321 165099
                return;
322
323
        /*
324
         * If there are no printf-expansions, don't waste time expanding them
325
         */
326 122554
        f = NULL;
327 1537648
        for (u = fmt; *u != '\0'; u++)
328 1415094
                if (*u == '%')
329 317948
                        f = u;
330 122554
        if (f == NULL) {
331 643
                t.b = TRUST_ME(fmt);
332 643
                t.e = TRUST_ME(u);
333 643
                VSLbt(vsl, tag, t);
334 643
                return;
335
        }
336
337 121911
        if (!strcmp(fmt, "%s")) {
338 42242
                p = va_arg(ap, char *);
339 42242
                t.b = p;
340 42242
                t.e = strchr(p, '\0');
341 42242
                VSLbt(vsl, tag, t);
342 42240
                return;
343
        }
344
345 79669
        mlen = cache_param->vsl_reclen;
346
347
        /* Flush if we cannot fit a full size record */
348 79669
        if (VSL_END(vsl->wlp, mlen + 1) >= vsl->wle)
349 22
                VSL_Flush(vsl, 1);
350
351 79672
        p = VSL_DATA(vsl->wlp);
352 79672
        n = vsnprintf(p, mlen, fmt, ap);
353 79672
        if (n > mlen - 1)
354 0
                n = mlen - 1;   /* we truncate long fields */
355 79672
        p[n++] = '\0';          /* NUL-terminated */
356 79672
        vsl->wlp = vsl_hdr(tag, vsl->wlp, n, vsl->wid);
357 79659
        assert(vsl->wlp < vsl->wle);
358 79659
        vsl->wlr++;
359
360 79659
        if (DO_DEBUG(DBG_SYNCVSL))
361 7063
                VSL_Flush(vsl, 0);
362
}
363
364
void
365 183389
VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
366
{
367
        va_list ap;
368
369 183389
        vsl_sanity(vsl);
370 183375
        va_start(ap, fmt);
371 183375
        VSLbv(vsl, tag, fmt, ap);
372 183376
        va_end(ap);
373 183376
}
374
375
void
376 28780
VSLb_ts(struct vsl_log *vsl, const char *event, double first, double *pprev,
377
    double now)
378
{
379
380
        /* XXX: Make an option to turn off some unnecessary timestamp
381
           logging. This must be done carefully because some functions
382
           (e.g. V1L_Open) takes the last timestamp as its initial
383
           value for timeout calculation. */
384 28780
        vsl_sanity(vsl);
385 28778
        assert(!isnan(now) && now != 0.);
386 28779
        VSLb(vsl, SLT_Timestamp, "%s: %.6f %.6f %.6f",
387 28779
            event, now, now - first, now - *pprev);
388 28782
        *pprev = now;
389 28782
}
390
391
void
392 1386
VSLb_bin(struct vsl_log *vsl, enum VSL_tag_e tag, ssize_t len, const void *ptr)
393
{
394
        char *p;
395 1386
        const uint8_t *pp = ptr;
396 1386
        int suff = 0;
397
        size_t tl, ll;
398
399 1386
        assert(len >= 0);
400 1386
        AN(pp);
401 1386
        if (vsl_tag_is_masked(tag))
402 1386
                return;
403 1386
        vsl_sanity(vsl);
404 1386
        tl = len * 2 + 1;
405 1386
        if (tl > cache_param->vsl_reclen) {
406 8
                len = (cache_param->vsl_reclen - 2) / 2;
407 8
                tl = len * 2 + 2;
408 8
                suff = 1;
409
        }
410 1386
        if (VSL_END(vsl->wlp, tl) >= vsl->wle)
411 0
                VSL_Flush(vsl, 1);
412 1386
        assert(VSL_END(vsl->wlp, tl) < vsl->wle);
413 1386
        p = VSL_DATA(vsl->wlp);
414 23202
        for (ll = 0; ll < len; ll++) {
415 21816
                assert(snprintf(p, 3, "%02x", *pp) == 2);
416 21816
                pp++;
417 21816
                p += 2;
418
        }
419 1386
        if (suff)
420 8
                *p++ = '-';
421 1386
        *p = '\0';
422 1386
        vsl->wlp = vsl_hdr(tag, vsl->wlp, tl, vsl->wid);
423 1386
        assert(vsl->wlp < vsl->wle);
424 1386
        vsl->wlr++;
425
}
426
427
/*--------------------------------------------------------------------
428
 * Setup a VSL buffer, allocate space if none provided.
429
 */
430
431
void
432 8241
VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len)
433
{
434
435 8241
        if (ptr == NULL) {
436 2456
                len = cache_param->vsl_buffer;
437 2456
                ptr = malloc(len);
438 2456
                AN(ptr);
439
        }
440 8241
        vsl->wlp = ptr;
441 8241
        vsl->wlb = ptr;
442 8241
        vsl->wle = ptr;
443 8241
        vsl->wle += len / sizeof(*vsl->wle);
444 8241
        vsl->wlr = 0;
445 8241
        vsl->wid = 0;
446 8241
        vsl_sanity(vsl);
447 8240
}
448
449
/*--------------------------------------------------------------------*/
450
451
void
452 118
VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, uint32_t vxid)
453
{
454
        uint32_t ovxid;
455
456 118
        vsl_sanity(vsl);
457 118
        ovxid = vsl->wid;
458 118
        VSLb(vsl, SLT_Link, "%s %u %s", typ, VXID(vxid), why);
459 118
        VSL_End(vsl);
460 118
        vsl->wid = vxid;
461 118
        VSLb(vsl, SLT_Begin, "%s %u %s", typ, VXID(ovxid), why);
462 118
}
463
464
/*--------------------------------------------------------------------*/
465
466
void
467 6821
VSL_End(struct vsl_log *vsl)
468
{
469
        txt t;
470 6821
        char p[] = "";
471
472 6821
        vsl_sanity(vsl);
473 6821
        AN(vsl->wid);
474 6821
        t.b = p;
475 6821
        t.e = p;
476 6821
        VSLbt(vsl, SLT_End, t);
477 6821
        VSL_Flush(vsl, 0);
478 6821
        vsl->wid = 0;
479 6821
}
480
481
static void
482 36064
vsm_vsc_lock(void)
483
{
484 36064
        AZ(pthread_mutex_lock(&vsm_mtx));
485 36064
}
486
487
static void
488 36064
vsm_vsc_unlock(void)
489
{
490 36064
        AZ(pthread_mutex_unlock(&vsm_mtx));
491 36064
}
492
493
/*--------------------------------------------------------------------*/
494
495
void
496 1228
VSM_Init(void)
497
{
498
        int i;
499
500
        assert(UINT_MAX % VSL_SEGMENTS == VSL_SEGMENTS - 1);
501
502 1228
        AZ(pthread_mutex_init(&vsl_mtx, NULL));
503 1228
        AZ(pthread_mutex_init(&vsm_mtx, NULL));
504
505 1228
        vsc_lock = vsm_vsc_lock;
506 1228
        vsc_unlock = vsm_vsc_unlock;
507
508 1228
        VSC_C_main = VSC_main_New("");
509 1228
        AN(VSC_C_main);
510
511 1228
        AN(heritage.proc_vsmw);
512 1228
        vsl_head = VSMW_Allocf(heritage.proc_vsmw, VSL_CLASS,
513 1228
            cache_param->vsl_space, VSL_CLASS);
514 1228
        AN(vsl_head);
515 2456
        vsl_segsize = ((cache_param->vsl_space - sizeof *vsl_head) /
516 1228
            sizeof *vsl_end) / VSL_SEGMENTS;
517 1228
        vsl_end = vsl_head->log + vsl_segsize * VSL_SEGMENTS;
518
        /* Make segment_n always overflow on first log wrap to make any
519
           problems with regard to readers on that event visible */
520 1228
        vsl_segment_n = UINT_MAX - (VSL_SEGMENTS - 1);
521 1228
        AZ(vsl_segment_n % VSL_SEGMENTS);
522 1228
        vsl_ptr = vsl_head->log;
523 1228
        *vsl_ptr = VSL_ENDMARKER;
524
525 1228
        memset(vsl_head, 0, sizeof *vsl_head);
526 1228
        vsl_head->segsize = vsl_segsize;
527 1228
        vsl_head->offset[0] = 0;
528 1228
        vsl_head->segment_n = vsl_segment_n;
529 9824
        for (i = 1; i < VSL_SEGMENTS; i++)
530 8596
                vsl_head->offset[i] = -1;
531 1228
        VWMB();
532 1228
        memcpy(vsl_head->marker, VSL_HEAD_MARKER, sizeof vsl_head->marker);
533 1228
}