varnish-cache/bin/varnishd/cache/cache_shmlog.c
1
/*-
2
 * Copyright (c) 2006 Verdens Gang AS
3
 * Copyright (c) 2006-2015 Varnish Software AS
4
 * All rights reserved.
5
 *
6
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
#include "config.h"
31
32
#include "cache_varnishd.h"
33
34
#include <errno.h>
35
#include <stdio.h>
36
#include <stdlib.h>
37
38
#include "vgz.h"
39
#include "vsl_priv.h"
40
#include "vmb.h"
41
42
#include "common/heritage.h"
43
#include "common/vsmw.h"
44
45
/* These cannot be struct lock, which depends on vsm/vsl working */
46
static pthread_mutex_t vsl_mtx;
47
static pthread_mutex_t vsc_mtx;
48
static pthread_mutex_t vsm_mtx;
49
50
static struct VSL_head          *vsl_head;
51
static const uint32_t           *vsl_end;
52
static uint32_t                 *vsl_ptr;
53
static unsigned                 vsl_segment_n;
54
static ssize_t                  vsl_segsize;
55
56
struct VSC_main *VSC_C_main;
57
58
static void
59 1149144
vsl_sanity(const struct vsl_log *vsl)
60
{
61 1149144
        AN(vsl);
62 1149144
        AN(vsl->wlp);
63 1149144
        AN(vsl->wlb);
64 1149144
        AN(vsl->wle);
65 1149144
}
66
67
/*--------------------------------------------------------------------
68
 * Check if the VSL_tag is masked by parameter bitmap
69
 */
70
71
static inline int
72 780442
vsl_tag_is_masked(enum VSL_tag_e tag)
73
{
74 780442
        volatile uint8_t *bm = &cache_param->vsl_mask[0];
75
        uint8_t b;
76
77 780442
        assert(tag > SLT__Bogus);
78 780442
        assert(tag < SLT__Reserved);
79 780442
        bm += ((unsigned)tag >> 3);
80 780442
        b = (0x80 >> ((unsigned)tag & 7));
81 780442
        return (*bm & b);
82
}
83
84
/*--------------------------------------------------------------------
85
 * Lay down a header fields, and return pointer to the next record
86
 */
87
88
static inline uint32_t *
89 505116
vsl_hdr(enum VSL_tag_e tag, uint32_t *p, unsigned len, uint32_t vxid)
90
{
91
92 505116
        AZ((uintptr_t)p & 0x3);
93 505116
        assert(tag > SLT__Bogus);
94 505116
        assert(tag < SLT__Reserved);
95 505116
        AZ(len & ~VSL_LENMASK);
96
97 505116
        p[1] = vxid;
98 505116
        p[0] = ((((unsigned)tag & 0xff) << 24) | len);
99 505116
        return (VSL_END(p, len));
100
}
101
102
/*--------------------------------------------------------------------
103
 * Wrap the VSL buffer
104
 */
105
106
static void
107 0
vsl_wrap(void)
108
{
109
110 0
        assert(vsl_ptr >= vsl_head->log);
111 0
        assert(vsl_ptr < vsl_end);
112 0
        vsl_segment_n += VSL_SEGMENTS - (vsl_segment_n % VSL_SEGMENTS);
113 0
        assert(vsl_segment_n % VSL_SEGMENTS == 0);
114 0
        vsl_head->offset[0] = 0;
115 0
        vsl_head->log[0] = VSL_ENDMARKER;
116 0
        VWMB();
117 0
        if (vsl_ptr != vsl_head->log) {
118 0
                *vsl_ptr = VSL_WRAPMARKER;
119 0
                vsl_ptr = vsl_head->log;
120
        }
121 0
        vsl_head->segment_n = vsl_segment_n;
122 0
        VSC_C_main->shm_cycles++;
123 0
}
124
125
/*--------------------------------------------------------------------
126
 * Reserve bytes for a record, wrap if necessary
127
 */
128
129
static uint32_t *
130 119068
vsl_get(unsigned len, unsigned records, unsigned flushes)
131
{
132
        uint32_t *p;
133
        int err;
134
135 119068
        err = pthread_mutex_trylock(&vsl_mtx);
136 119092
        if (err == EBUSY) {
137 180
                AZ(pthread_mutex_lock(&vsl_mtx));
138 180
                VSC_C_main->shm_cont++;
139
        } else {
140 118912
                AZ(err);
141
        }
142 119092
        assert(vsl_ptr < vsl_end);
143 119092
        AZ((uintptr_t)vsl_ptr & 0x3);
144
145 119092
        VSC_C_main->shm_writes++;
146 119092
        VSC_C_main->shm_flushes += flushes;
147 119092
        VSC_C_main->shm_records += records;
148
149
        /* Wrap if necessary */
150 119092
        if (VSL_END(vsl_ptr, len) >= vsl_end)
151 0
                vsl_wrap();
152
153 119092
        p = vsl_ptr;
154 119092
        vsl_ptr = VSL_END(vsl_ptr, len);
155 119092
        assert(vsl_ptr < vsl_end);
156 119092
        AZ((uintptr_t)vsl_ptr & 0x3);
157
158 119092
        *vsl_ptr = VSL_ENDMARKER;
159
160 357276
        while ((vsl_ptr - vsl_head->log) / vsl_segsize >
161 119092
            vsl_segment_n % VSL_SEGMENTS) {
162 0
                vsl_segment_n++;
163 0
                vsl_head->offset[vsl_segment_n % VSL_SEGMENTS] =
164 0
                    vsl_ptr - vsl_head->log;
165
        }
166
167 119092
        AZ(pthread_mutex_unlock(&vsl_mtx));
168
        /* Implicit VWMB() in mutex op ensures ENDMARKER and new table
169
           values are seen before new segment number */
170 119092
        vsl_head->segment_n = vsl_segment_n;
171
172 119092
        return (p);
173
}
174
175
/*--------------------------------------------------------------------
176
 * Stick a finished record into VSL.
177
 */
178
179
static void
180 71513
vslr(enum VSL_tag_e tag, uint32_t vxid, const char *b, unsigned len)
181
{
182
        uint32_t *p;
183
        unsigned mlen;
184
185 71513
        mlen = cache_param->vsl_reclen;
186
187
        /* Truncate */
188 71513
        if (len > mlen)
189 0
                len = mlen;
190
191 71513
        p = vsl_get(len, 1, 0);
192
193 71523
        memcpy(p + 2, b, len);
194
195
        /*
196
         * vsl_hdr() writes p[1] again, but we want to make sure it
197
         * has hit memory because we work on the live buffer here.
198
         */
199 71523
        p[1] = vxid;
200 71523
        VWMB();
201 71522
        (void)vsl_hdr(tag, p, len, vxid);
202 71520
}
203
204
/*--------------------------------------------------------------------
205
 * Add a unbuffered record to VSL
206
 *
207
 * NB: This variant should be used sparingly and only for low volume
208
 * NB: since it significantly adds to the mutex load on the VSL.
209
 */
210
211
void
212 115942
VSLv(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, va_list ap)
213 115942
{
214 115942
        unsigned n, mlen = cache_param->vsl_reclen;
215 115942
        char buf[mlen];
216
217 115942
        AN(fmt);
218 115942
        if (vsl_tag_is_masked(tag))
219 44422
                return;
220
221 71510
        if (strchr(fmt, '%') == NULL) {
222 2795
                vslr(tag, vxid, fmt, strlen(fmt) + 1);
223
        } else {
224 68715
                n = vsnprintf(buf, mlen, fmt, ap);
225 68715
                if (n > mlen - 1)
226 189
                        n = mlen - 1;
227 68715
                buf[n++] = '\0'; /* NUL-terminated */
228 68715
                vslr(tag, vxid, buf, n);
229
        }
230
231
}
232
233
void
234 115901
VSL(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, ...)
235
{
236
        va_list ap;
237
238 115901
        va_start(ap, fmt);
239 115901
        VSLv(tag, vxid, fmt, ap);
240 115911
        va_end(ap);
241 115911
}
242
243
/*--------------------------------------------------------------------*/
244
245
void
246 57453
VSL_Flush(struct vsl_log *vsl, int overflow)
247
{
248
        uint32_t *p;
249
        unsigned l;
250
251 57453
        vsl_sanity(vsl);
252 57450
        l = pdiff(vsl->wlb, vsl->wlp);
253 57451
        if (l == 0)
254 9894
                return;
255
256 47557
        assert(l >= 8);
257
258 47557
        p = vsl_get(l, vsl->wlr, overflow);
259
260 47569
        memcpy(p + 2, vsl->wlb, l);
261 47569
        p[1] = l;
262 47569
        VWMB();
263 47569
        p[0] = ((((unsigned)SLT__Batch & 0xff) << 24) | 0);
264 47569
        vsl->wlp = vsl->wlb;
265 47569
        vsl->wlr = 0;
266
}
267
268
/*--------------------------------------------------------------------
269
 * VSL-buffered-txt
270
 */
271
272
void
273 315296
VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
274
{
275
        unsigned l, mlen;
276
        char *p;
277
278 315296
        vsl_sanity(vsl);
279 315279
        Tcheck(t);
280 315279
        if (vsl_tag_is_masked(tag))
281 27204
                return;
282 288086
        mlen = cache_param->vsl_reclen;
283
284
        /* Truncate */
285 288086
        l = Tlen(t);
286 288072
        if (l > mlen - 1)
287 603
                l = mlen - 1;
288
289 288072
        assert(vsl->wlp < vsl->wle);
290
291
        /* Flush if necessary */
292 288072
        if (VSL_END(vsl->wlp, l + 1) >= vsl->wle)
293 18
                VSL_Flush(vsl, 1);
294 288077
        assert(VSL_END(vsl->wlp, l + 1) < vsl->wle);
295 288077
        p = VSL_DATA(vsl->wlp);
296 288077
        memcpy(p, t.b, l);
297 288077
        p[l++] = '\0';          /* NUL-terminated */
298 288077
        vsl->wlp = vsl_hdr(tag, vsl->wlp, l, vsl->wid);
299 288096
        assert(vsl->wlp < vsl->wle);
300 288096
        vsl->wlr++;
301
302 288096
        if (DO_DEBUG(DBG_SYNCVSL))
303 19979
                VSL_Flush(vsl, 0);
304
}
305
306
/*--------------------------------------------------------------------
307
 * VSL-buffered
308
 */
309
310
void
311 346695
VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
312
{
313
        char *p;
314
        const char *u, *f;
315
        unsigned n, mlen;
316
        txt t;
317
318 346695
        vsl_sanity(vsl);
319 346655
        AN(fmt);
320 346655
        if (vsl_tag_is_masked(tag))
321 317681
                return;
322
323
        /*
324
         * If there are no printf-expansions, don't waste time expanding them
325
         */
326 230388
        f = NULL;
327 2834078
        for (u = fmt; *u != '\0'; u++)
328 2603690
                if (*u == '%')
329 597028
                        f = u;
330 230388
        if (f == NULL) {
331 1485
                t.b = TRUST_ME(fmt);
332 1485
                t.e = TRUST_ME(u);
333 1485
                VSLbt(vsl, tag, t);
334 1485
                return;
335
        }
336
337 228903
        if (!strcmp(fmt, "%s")) {
338 83346
                p = va_arg(ap, char *);
339 83348
                t.b = p;
340 83348
                t.e = strchr(p, '\0');
341 83348
                VSLbt(vsl, tag, t);
342 83344
                return;
343
        }
344
345 145557
        mlen = cache_param->vsl_reclen;
346
347
        /* Flush if we cannot fit a full size record */
348 145557
        if (VSL_END(vsl->wlp, mlen + 1) >= vsl->wle)
349 30
                VSL_Flush(vsl, 1);
350
351 145579
        p = VSL_DATA(vsl->wlp);
352 145579
        n = vsnprintf(p, mlen, fmt, ap);
353 145579
        if (n > mlen - 1)
354 9
                n = mlen - 1;   /* we truncate long fields */
355 145579
        p[n++] = '\0';          /* NUL-terminated */
356 145579
        vsl->wlp = vsl_hdr(tag, vsl->wlp, n, vsl->wid);
357 145571
        assert(vsl->wlp < vsl->wle);
358 145571
        vsl->wlr++;
359
360 145571
        if (DO_DEBUG(DBG_SYNCVSL))
361 12445
                VSL_Flush(vsl, 0);
362
}
363
364
void
365 346252
VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
366
{
367
        va_list ap;
368
369 346252
        vsl_sanity(vsl);
370 346234
        va_start(ap, fmt);
371 346234
        VSLbv(vsl, tag, fmt, ap);
372 346248
        va_end(ap);
373 346248
}
374
375
void
376 53681
VSLb_ts(struct vsl_log *vsl, const char *event, double first, double *pprev,
377
    double now)
378
{
379
380
        /* XXX: Make an option to turn off some unnecessary timestamp
381
           logging. This must be done carefully because some functions
382
           (e.g. V1L_Open) takes the last timestamp as its initial
383
           value for timeout calculation. */
384 53681
        vsl_sanity(vsl);
385 53683
        assert(!isnan(now) && now != 0.);
386 53684
        VSLb(vsl, SLT_Timestamp, "%s: %.6f %.6f %.6f",
387 53684
            event, now, now - first, now - *pprev);
388 53685
        *pprev = now;
389 53685
}
390
391
void
392 2850
VSLb_bin(struct vsl_log *vsl, enum VSL_tag_e tag, ssize_t len, const void *ptr)
393
{
394
        unsigned mlen;
395
        char *p;
396
397 2850
        vsl_sanity(vsl);
398 2850
        AN(ptr);
399 2850
        if (vsl_tag_is_masked(tag))
400 2739
                return;
401 111
        mlen = cache_param->vsl_reclen;
402
403
        /* Truncate */
404 111
        if (len > mlen)
405 3
                len = mlen;
406
407 111
        assert(vsl->wlp < vsl->wle);
408
409
        /* Flush if necessary */
410 111
        if (VSL_END(vsl->wlp, len) >= vsl->wle)
411 0
                VSL_Flush(vsl, 1);
412 111
        assert(VSL_END(vsl->wlp, len) < vsl->wle);
413 111
        p = VSL_DATA(vsl->wlp);
414 111
        memcpy(p, ptr, len);
415 111
        vsl->wlp = vsl_hdr(tag, vsl->wlp, len, vsl->wid);
416 111
        assert(vsl->wlp < vsl->wle);
417 111
        vsl->wlr++;
418
419 111
        if (DO_DEBUG(DBG_SYNCVSL))
420 111
                VSL_Flush(vsl, 0);
421
}
422
423
/*--------------------------------------------------------------------
424
 * Setup a VSL buffer, allocate space if none provided.
425
 */
426
427
void
428 14979
VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len)
429
{
430
431 14979
        if (ptr == NULL) {
432 4110
                len = cache_param->vsl_buffer;
433 4110
                ptr = malloc(len);
434 4110
                AN(ptr);
435
        }
436 14979
        vsl->wlp = ptr;
437 14979
        vsl->wlb = ptr;
438 14979
        vsl->wle = ptr;
439 14979
        vsl->wle += len / sizeof(*vsl->wle);
440 14979
        vsl->wlr = 0;
441 14979
        vsl->wid = 0;
442 14979
        vsl_sanity(vsl);
443 14979
}
444
445
/*--------------------------------------------------------------------*/
446
447
void
448 207
VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, uint32_t vxid)
449
{
450
        uint32_t ovxid;
451
452 207
        vsl_sanity(vsl);
453 207
        ovxid = vsl->wid;
454 207
        VSLb(vsl, SLT_Link, "%s %u %s", typ, VXID(vxid), why);
455 207
        VSL_End(vsl);
456 207
        vsl->wid = vxid;
457 207
        VSLb(vsl, SLT_Begin, "%s %u %s", typ, VXID(ovxid), why);
458 207
}
459
460
/*--------------------------------------------------------------------*/
461
462
void
463 12711
VSL_End(struct vsl_log *vsl)
464
{
465
        txt t;
466 12711
        char p[] = "";
467
468 12711
        vsl_sanity(vsl);
469 12711
        AN(vsl->wid);
470 12711
        t.b = p;
471 12711
        t.e = p;
472 12711
        VSLbt(vsl, SLT_End, t);
473 12711
        VSL_Flush(vsl, 0);
474 12711
        vsl->wid = 0;
475 12711
}
476
477
static void v_matchproto_(vsm_lock_f)
478 63393
vsm_vsc_lock(void)
479
{
480 63393
        AZ(pthread_mutex_lock(&vsc_mtx));
481 63393
}
482
483
static void v_matchproto_(vsm_lock_f)
484 63393
vsm_vsc_unlock(void)
485
{
486 63393
        AZ(pthread_mutex_unlock(&vsc_mtx));
487 63393
}
488
489
static void v_matchproto_(vsm_lock_f)
490 76290
vsm_vsmw_lock(void)
491
{
492 76290
        AZ(pthread_mutex_lock(&vsm_mtx));
493 76290
}
494
495
static void v_matchproto_(vsm_lock_f)
496 76290
vsm_vsmw_unlock(void)
497
{
498 76290
        AZ(pthread_mutex_unlock(&vsm_mtx));
499 76290
}
500
501
/*--------------------------------------------------------------------*/
502
503
void
504 2055
VSM_Init(void)
505
{
506
        int i;
507
508
        assert(UINT_MAX % VSL_SEGMENTS == VSL_SEGMENTS - 1);
509
510 2055
        AZ(pthread_mutex_init(&vsl_mtx, NULL));
511 2055
        AZ(pthread_mutex_init(&vsc_mtx, NULL));
512 2055
        AZ(pthread_mutex_init(&vsm_mtx, NULL));
513
514 2055
        vsc_lock = vsm_vsc_lock;
515 2055
        vsc_unlock = vsm_vsc_unlock;
516 2055
        vsmw_lock = vsm_vsmw_lock;
517 2055
        vsmw_unlock = vsm_vsmw_unlock;
518
519 2055
        VSC_C_main = VSC_main_New(NULL, NULL, "");
520 2055
        AN(VSC_C_main);
521
522 2055
        AN(heritage.proc_vsmw);
523 2055
        vsl_head = VSMW_Allocf(heritage.proc_vsmw, NULL, VSL_CLASS,
524 2055
            cache_param->vsl_space, VSL_CLASS);
525 2055
        AN(vsl_head);
526 4110
        vsl_segsize = ((cache_param->vsl_space - sizeof *vsl_head) /
527 2055
            sizeof *vsl_end) / VSL_SEGMENTS;
528 2055
        vsl_end = vsl_head->log + vsl_segsize * VSL_SEGMENTS;
529
        /* Make segment_n always overflow on first log wrap to make any
530
           problems with regard to readers on that event visible */
531 2055
        vsl_segment_n = UINT_MAX - (VSL_SEGMENTS - 1);
532 2055
        AZ(vsl_segment_n % VSL_SEGMENTS);
533 2055
        vsl_ptr = vsl_head->log;
534 2055
        *vsl_ptr = VSL_ENDMARKER;
535
536 2055
        memset(vsl_head, 0, sizeof *vsl_head);
537 2055
        vsl_head->segsize = vsl_segsize;
538 2055
        vsl_head->offset[0] = 0;
539 2055
        vsl_head->segment_n = vsl_segment_n;
540 16440
        for (i = 1; i < VSL_SEGMENTS; i++)
541 14385
                vsl_head->offset[i] = -1;
542 2055
        VWMB();
543 2055
        memcpy(vsl_head->marker, VSL_HEAD_MARKER, sizeof vsl_head->marker);
544 2055
}