varnish-cache/bin/varnishd/cache/cache_shmlog.c
1
/*-
2
 * Copyright (c) 2006 Verdens Gang AS
3
 * Copyright (c) 2006-2015 Varnish Software AS
4
 * All rights reserved.
5
 *
6
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
#include "config.h"
31
32
#include "cache_varnishd.h"
33
34
#include <errno.h>
35
#include <stdio.h>
36
#include <stdlib.h>
37
38
#include "vgz.h"
39
#include "vsl_priv.h"
40
#include "vmb.h"
41
42
#include "common/heritage.h"
43
#include "common/vsmw.h"
44
45
/* These cannot be struct lock, which depends on vsm/vsl working */
46
static pthread_mutex_t vsl_mtx;
47
static pthread_mutex_t vsc_mtx;
48
static pthread_mutex_t vsm_mtx;
49
50
static struct VSL_head          *vsl_head;
51
static const uint32_t           *vsl_end;
52
static uint32_t                 *vsl_ptr;
53
static unsigned                 vsl_segment_n;
54
static ssize_t                  vsl_segsize;
55
56
struct VSC_main *VSC_C_main;
57
58
static void
59 1937952
vsl_sanity(const struct vsl_log *vsl)
60
{
61 1937952
        AN(vsl);
62 1937952
        AN(vsl->wlp);
63 1937952
        AN(vsl->wlb);
64 1937952
        AN(vsl->wle);
65 1937952
}
66
67
/*--------------------------------------------------------------------
68
 * Check if the VSL_tag is masked by parameter bitmap
69
 */
70
71
static inline int
72 1314512
vsl_tag_is_masked(enum VSL_tag_e tag)
73
{
74 1314512
        volatile uint8_t *bm = &cache_param->vsl_mask[0];
75
        uint8_t b;
76
77 1314512
        assert(tag > SLT__Bogus);
78 1314512
        assert(tag < SLT__Reserved);
79 1314512
        bm += ((unsigned)tag >> 3);
80 1314512
        b = (0x80 >> ((unsigned)tag & 7));
81 1314512
        return (*bm & b);
82
}
83
84
/*--------------------------------------------------------------------
85
 * Lay down a header fields, and return pointer to the next record
86
 */
87
88
static inline uint32_t *
89 851714
vsl_hdr(enum VSL_tag_e tag, uint32_t *p, unsigned len, uint32_t vxid)
90
{
91
92 851714
        AZ((uintptr_t)p & 0x3);
93 851714
        assert(tag > SLT__Bogus);
94 851714
        assert(tag < SLT__Reserved);
95 851714
        AZ(len & ~VSL_LENMASK);
96
97 851714
        p[1] = vxid;
98 851714
        p[0] = ((((unsigned)tag & 0xff) << 24) | len);
99 851714
        return (VSL_END(p, len));
100
}
101
102
/*--------------------------------------------------------------------
103
 * Wrap the VSL buffer
104
 */
105
106
static void
107 0
vsl_wrap(void)
108
{
109
110 0
        assert(vsl_ptr >= vsl_head->log);
111 0
        assert(vsl_ptr < vsl_end);
112 0
        vsl_segment_n += VSL_SEGMENTS - (vsl_segment_n % VSL_SEGMENTS);
113 0
        assert(vsl_segment_n % VSL_SEGMENTS == 0);
114 0
        vsl_head->offset[0] = 0;
115 0
        vsl_head->log[0] = VSL_ENDMARKER;
116 0
        VWMB();
117 0
        if (vsl_ptr != vsl_head->log) {
118 0
                *vsl_ptr = VSL_WRAPMARKER;
119 0
                vsl_ptr = vsl_head->log;
120
        }
121 0
        vsl_head->segment_n = vsl_segment_n;
122 0
        VSC_C_main->shm_cycles++;
123 0
}
124
125
/*--------------------------------------------------------------------
126
 * Reserve bytes for a record, wrap if necessary
127
 */
128
129
static uint32_t *
130 199708
vsl_get(unsigned len, unsigned records, unsigned flushes)
131
{
132
        uint32_t *p;
133
        int err;
134
135 199708
        err = pthread_mutex_trylock(&vsl_mtx);
136 199751
        if (err == EBUSY) {
137 309
                AZ(pthread_mutex_lock(&vsl_mtx));
138 309
                VSC_C_main->shm_cont++;
139
        } else {
140 199442
                AZ(err);
141
        }
142 199751
        assert(vsl_ptr < vsl_end);
143 199751
        AZ((uintptr_t)vsl_ptr & 0x3);
144
145 199751
        VSC_C_main->shm_writes++;
146 199751
        VSC_C_main->shm_flushes += flushes;
147 199751
        VSC_C_main->shm_records += records;
148
149
        /* Wrap if necessary */
150 199751
        if (VSL_END(vsl_ptr, len) >= vsl_end)
151 0
                vsl_wrap();
152
153 199751
        p = vsl_ptr;
154 199751
        vsl_ptr = VSL_END(vsl_ptr, len);
155 199751
        assert(vsl_ptr < vsl_end);
156 199751
        AZ((uintptr_t)vsl_ptr & 0x3);
157
158 199751
        *vsl_ptr = VSL_ENDMARKER;
159
160 599253
        while ((vsl_ptr - vsl_head->log) / vsl_segsize >
161 199751
            vsl_segment_n % VSL_SEGMENTS) {
162 0
                vsl_segment_n++;
163 0
                vsl_head->offset[vsl_segment_n % VSL_SEGMENTS] =
164 0
                    vsl_ptr - vsl_head->log;
165
        }
166
167 199751
        AZ(pthread_mutex_unlock(&vsl_mtx));
168
        /* Implicit VWMB() in mutex op ensures ENDMARKER and new table
169
           values are seen before new segment number */
170 199751
        vsl_head->segment_n = vsl_segment_n;
171
172 199751
        return (p);
173
}
174
175
/*--------------------------------------------------------------------
176
 * Stick a finished record into VSL.
177
 */
178
179
static void
180 119122
vslr(enum VSL_tag_e tag, uint32_t vxid, const char *b, unsigned len)
181
{
182
        uint32_t *p;
183
        unsigned mlen;
184
185 119122
        mlen = cache_param->vsl_reclen;
186
187
        /* Truncate */
188 119122
        if (len > mlen)
189 0
                len = mlen;
190
191 119122
        p = vsl_get(len, 1, 0);
192
193 119136
        memcpy(p + 2, b, len);
194
195
        /*
196
         * vsl_hdr() writes p[1] again, but we want to make sure it
197
         * has hit memory because we work on the live buffer here.
198
         */
199 119136
        p[1] = vxid;
200 119136
        VWMB();
201 119136
        (void)vsl_hdr(tag, p, len, vxid);
202 119136
}
203
204
/*--------------------------------------------------------------------
205
 * Add a unbuffered record to VSL
206
 *
207
 * NB: This variant should be used sparingly and only for low volume
208
 * NB: since it significantly adds to the mutex load on the VSL.
209
 */
210
211
void
212 193459
VSLv(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, va_list ap)
213 193459
{
214 193459
        unsigned n, mlen = cache_param->vsl_reclen;
215 193459
        char buf[mlen];
216
217 193459
        AN(fmt);
218 193459
        if (vsl_tag_is_masked(tag))
219 74341
                return;
220
221 119125
        if (strchr(fmt, '%') == NULL) {
222 4670
                vslr(tag, vxid, fmt, strlen(fmt) + 1);
223
        } else {
224 114455
                n = vsnprintf(buf, mlen, fmt, ap);
225 114455
                if (n > mlen - 1)
226 370
                        n = mlen - 1;
227 114455
                buf[n++] = '\0'; /* NUL-terminated */
228 114455
                vslr(tag, vxid, buf, n);
229
        }
230
231
}
232
233
void
234 193383
VSL(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, ...)
235
{
236
        va_list ap;
237
238 193383
        va_start(ap, fmt);
239 193383
        VSLv(tag, vxid, fmt, ap);
240 193425
        va_end(ap);
241 193425
}
242
243
/*--------------------------------------------------------------------*/
244
245
void
246 96913
VSL_Flush(struct vsl_log *vsl, int overflow)
247
{
248
        uint32_t *p;
249
        unsigned l;
250
251 96913
        vsl_sanity(vsl);
252 96904
        l = pdiff(vsl->wlb, vsl->wlp);
253 96912
        if (l == 0)
254 16322
                return;
255
256 80590
        assert(l >= 8);
257
258 80590
        p = vsl_get(l, vsl->wlr, overflow);
259
260 80615
        memcpy(p + 2, vsl->wlb, l);
261 80615
        p[1] = l;
262 80615
        VWMB();
263 80615
        p[0] = ((((unsigned)SLT__Batch & 0xff) << 24));
264 80615
        vsl->wlp = vsl->wlb;
265 80615
        vsl->wlr = 0;
266
}
267
268
/*--------------------------------------------------------------------
269
 * VSL-buffered-txt
270
 */
271
272
void
273 531398
VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
274
{
275
        unsigned l, mlen;
276
        char *p;
277
278 531398
        vsl_sanity(vsl);
279 531360
        Tcheck(t);
280 531360
        if (vsl_tag_is_masked(tag))
281 45560
                return;
282 485815
        mlen = cache_param->vsl_reclen;
283
284
        /* Truncate */
285 485815
        l = Tlen(t);
286 485802
        if (l > mlen - 1)
287 1015
                l = mlen - 1;
288
289 485802
        assert(vsl->wlp < vsl->wle);
290
291
        /* Flush if necessary */
292 485802
        if (VSL_END(vsl->wlp, l + 1) >= vsl->wle)
293 30
                VSL_Flush(vsl, 1);
294 485813
        assert(VSL_END(vsl->wlp, l + 1) < vsl->wle);
295 485813
        p = VSL_DATA(vsl->wlp);
296 485813
        memcpy(p, t.b, l);
297 485813
        p[l++] = '\0';          /* NUL-terminated */
298 485813
        vsl->wlp = vsl_hdr(tag, vsl->wlp, l, vsl->wid);
299 485883
        assert(vsl->wlp < vsl->wle);
300 485883
        vsl->wlr++;
301
302 485883
        if (DO_DEBUG(DBG_SYNCVSL))
303 33823
                VSL_Flush(vsl, 0);
304
}
305
306
/*--------------------------------------------------------------------
307
 * VSL-buffered
308
 */
309
310
void
311 585284
VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
312
{
313
        char *p;
314
        const char *u, *f;
315
        unsigned n, mlen;
316
        txt t;
317
318 585284
        vsl_sanity(vsl);
319 585270
        AN(fmt);
320 585270
        if (vsl_tag_is_masked(tag))
321 534680
                return;
322
323
        /*
324
         * If there are no printf-expansions, don't waste time expanding them
325
         */
326 389584
        f = NULL;
327 4798003
        for (u = fmt; *u != '\0'; u++)
328 4408419
                if (*u == '%')
329 1008682
                        f = u;
330 389584
        if (f == NULL) {
331 2546
                t.b = TRUST_ME(fmt);
332 2546
                t.e = TRUST_ME(u);
333 2546
                VSLbt(vsl, tag, t);
334 2546
                return;
335
        }
336
337 387038
        if (!strcmp(fmt, "%s")) {
338 140275
                p = va_arg(ap, char *);
339 140275
                t.b = p;
340 140275
                t.e = strchr(p, '\0');
341 140275
                VSLbt(vsl, tag, t);
342 140272
                return;
343
        }
344
345 246763
        mlen = cache_param->vsl_reclen;
346
347
        /* Flush if we cannot fit a full size record */
348 246763
        if (VSL_END(vsl->wlp, mlen + 1) >= vsl->wle)
349 45
                VSL_Flush(vsl, 1);
350
351 246796
        p = VSL_DATA(vsl->wlp);
352 246796
        n = vsnprintf(p, mlen, fmt, ap);
353 246796
        if (n > mlen - 1)
354 15
                n = mlen - 1;   /* we truncate long fields */
355 246796
        p[n++] = '\0';          /* NUL-terminated */
356 246796
        vsl->wlp = vsl_hdr(tag, vsl->wlp, n, vsl->wid);
357 246787
        assert(vsl->wlp < vsl->wle);
358 246787
        vsl->wlr++;
359
360 246787
        if (DO_DEBUG(DBG_SYNCVSL))
361 21227
                VSL_Flush(vsl, 0);
362
}
363
364
void
365 584486
VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
366
{
367
        va_list ap;
368
369 584486
        vsl_sanity(vsl);
370 584465
        va_start(ap, fmt);
371 584465
        VSLbv(vsl, tag, fmt, ap);
372 584560
        va_end(ap);
373 584560
}
374
375
void
376 90359
VSLb_ts(struct vsl_log *vsl, const char *event, double first, double *pprev,
377
    double now)
378
{
379
380
        /* XXX: Make an option to turn off some unnecessary timestamp
381
           logging. This must be done carefully because some functions
382
           (e.g. V1L_Open) takes the last timestamp as its initial
383
           value for timeout calculation. */
384 90359
        vsl_sanity(vsl);
385 90357
        assert(!isnan(now) && now != 0.);
386 90355
        VSLb(vsl, SLT_Timestamp, "%s: %.6f %.6f %.6f",
387 90355
            event, now, now - first, now - *pprev);
388 90359
        *pprev = now;
389 90359
}
390
391
void
392 4897
VSLb_bin(struct vsl_log *vsl, enum VSL_tag_e tag, ssize_t len, const void *ptr)
393
{
394
        unsigned mlen;
395
        char *p;
396
397 4897
        vsl_sanity(vsl);
398 4897
        AN(ptr);
399 4897
        if (vsl_tag_is_masked(tag))
400 4687
                return;
401 210
        mlen = cache_param->vsl_reclen;
402
403
        /* Truncate */
404 210
        if (len > mlen)
405 5
                len = mlen;
406
407 210
        assert(vsl->wlp < vsl->wle);
408
409
        /* Flush if necessary */
410 210
        if (VSL_END(vsl->wlp, len) >= vsl->wle)
411 0
                VSL_Flush(vsl, 1);
412 210
        assert(VSL_END(vsl->wlp, len) < vsl->wle);
413 210
        p = VSL_DATA(vsl->wlp);
414 210
        memcpy(p, ptr, len);
415 210
        vsl->wlp = vsl_hdr(tag, vsl->wlp, len, vsl->wid);
416 210
        assert(vsl->wlp < vsl->wle);
417 210
        vsl->wlr++;
418
419 210
        if (DO_DEBUG(DBG_SYNCVSL))
420 210
                VSL_Flush(vsl, 0);
421
}
422
423
/*--------------------------------------------------------------------
424
 * Setup a VSL buffer, allocate space if none provided.
425
 */
426
427
void
428 24740
VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len)
429
{
430
431 24740
        if (ptr == NULL) {
432 6880
                len = cache_param->vsl_buffer;
433 6880
                ptr = malloc(len);
434 6880
                AN(ptr);
435
        }
436 24740
        vsl->wlp = ptr;
437 24740
        vsl->wlb = ptr;
438 24740
        vsl->wle = ptr;
439 24740
        vsl->wle += len / sizeof(*vsl->wle);
440 24740
        vsl->wlr = 0;
441 24740
        vsl->wid = 0;
442 24740
        vsl_sanity(vsl);
443 24740
}
444
445
/*--------------------------------------------------------------------*/
446
447
void
448 345
VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, uint32_t vxid)
449
{
450
        uint32_t ovxid;
451
452 345
        vsl_sanity(vsl);
453 345
        ovxid = vsl->wid;
454 345
        VSLb(vsl, SLT_Link, "%s %u %s", typ, VXID(vxid), why);
455 345
        VSL_End(vsl);
456 345
        vsl->wid = vxid;
457 345
        VSLb(vsl, SLT_Begin, "%s %u %s", typ, VXID(ovxid), why);
458 345
}
459
460
/*--------------------------------------------------------------------*/
461
462
void
463 21390
VSL_End(struct vsl_log *vsl)
464
{
465
        txt t;
466 21390
        char p[] = "";
467
468 21390
        vsl_sanity(vsl);
469 21390
        AN(vsl->wid);
470 21390
        t.b = p;
471 21390
        t.e = p;
472 21390
        VSLbt(vsl, SLT_End, t);
473 21390
        VSL_Flush(vsl, 0);
474 21390
        vsl->wid = 0;
475 21390
}
476
477
static void v_matchproto_(vsm_lock_f)
478 106195
vsm_vsc_lock(void)
479
{
480 106195
        AZ(pthread_mutex_lock(&vsc_mtx));
481 106195
}
482
483
static void v_matchproto_(vsm_lock_f)
484 106195
vsm_vsc_unlock(void)
485
{
486 106195
        AZ(pthread_mutex_unlock(&vsc_mtx));
487 106195
}
488
489
static void v_matchproto_(vsm_lock_f)
490 127840
vsm_vsmw_lock(void)
491
{
492 127840
        AZ(pthread_mutex_lock(&vsm_mtx));
493 127840
}
494
495
static void v_matchproto_(vsm_lock_f)
496 127840
vsm_vsmw_unlock(void)
497
{
498 127840
        AZ(pthread_mutex_unlock(&vsm_mtx));
499 127840
}
500
501
/*--------------------------------------------------------------------*/
502
503
void
504 3440
VSM_Init(void)
505
{
506
        int i;
507
508
        assert(UINT_MAX % VSL_SEGMENTS == VSL_SEGMENTS - 1);
509
510 3440
        AZ(pthread_mutex_init(&vsl_mtx, NULL));
511 3440
        AZ(pthread_mutex_init(&vsc_mtx, NULL));
512 3440
        AZ(pthread_mutex_init(&vsm_mtx, NULL));
513
514 3440
        vsc_lock = vsm_vsc_lock;
515 3440
        vsc_unlock = vsm_vsc_unlock;
516 3440
        vsmw_lock = vsm_vsmw_lock;
517 3440
        vsmw_unlock = vsm_vsmw_unlock;
518
519 3440
        VSC_C_main = VSC_main_New(NULL, NULL, "");
520 3440
        AN(VSC_C_main);
521
522 3440
        AN(heritage.proc_vsmw);
523 3440
        vsl_head = VSMW_Allocf(heritage.proc_vsmw, NULL, VSL_CLASS,
524 3440
            cache_param->vsl_space, VSL_CLASS);
525 3440
        AN(vsl_head);
526 6880
        vsl_segsize = ((cache_param->vsl_space - sizeof *vsl_head) /
527 3440
            sizeof *vsl_end) / VSL_SEGMENTS;
528 3440
        vsl_end = vsl_head->log + vsl_segsize * VSL_SEGMENTS;
529
        /* Make segment_n always overflow on first log wrap to make any
530
           problems with regard to readers on that event visible */
531 3440
        vsl_segment_n = UINT_MAX - (VSL_SEGMENTS - 1);
532 3440
        AZ(vsl_segment_n % VSL_SEGMENTS);
533 3440
        vsl_ptr = vsl_head->log;
534 3440
        *vsl_ptr = VSL_ENDMARKER;
535
536 3440
        memset(vsl_head, 0, sizeof *vsl_head);
537 3440
        vsl_head->segsize = vsl_segsize;
538 3440
        vsl_head->offset[0] = 0;
539 3440
        vsl_head->segment_n = vsl_segment_n;
540 27520
        for (i = 1; i < VSL_SEGMENTS; i++)
541 24080
                vsl_head->offset[i] = -1;
542 3440
        VWMB();
543 3440
        memcpy(vsl_head->marker, VSL_HEAD_MARKER, sizeof vsl_head->marker);
544 3440
}