varnish-cache/bin/varnishd/cache/cache_shmlog.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 */
30
31
#include "config.h"
32
33
#include "cache_varnishd.h"
34
35
#include <stdio.h>
36
#include <stdlib.h>
37
38
#include "vgz.h"
39
#include "vsl_priv.h"
40
#include "vmb.h"
41
42
#include "common/heritage.h"
43
#include "common/vsmw.h"
44
45
/* ------------------------------------------------------------
46
 * strands helpers - move elsewhere?
47
 */
48
49
static unsigned
50 1879599
strands_len(const struct strands *s)
51
{
52 1879599
        unsigned r = 0;
53
        int i;
54
55 3761574
        for (i = 0; i < s->n; i++) {
56 1881975
                if (s->p[i] == NULL || *s->p[i] == '\0')
57 180196
                        continue;
58 1701779
                r += strlen(s->p[i]);
59 1701779
        }
60
61 1879599
        return (r);
62
}
63
64
/*
65
 * like VRT_Strands(), but truncating instead of failing for end of buffer
66
 *
67
 * returns number of bytes including NUL
68
 */
69
static unsigned
70 1886942
strands_cat(char *buf, unsigned bufl, const struct strands *s)
71
{
72 1886942
        unsigned l = 0, ll;
73
        int i;
74
75
        /* NUL-terminated */
76 1886942
        assert(bufl > 0);
77 1886942
        bufl--;
78
79 3599808
        for (i = 0; i < s->n && bufl > 0; i++) {
80 1712876
                if (s->p[i] == NULL || *s->p[i] == '\0')
81 104
                        continue;
82 1712786
                ll = vmin_t(unsigned, strlen(s->p[i]), bufl);
83 1712786
                memcpy(buf, s->p[i], ll);
84 1712786
                l += ll;
85 1712786
                buf += ll;
86 1712786
                bufl -= ll;
87 1712786
        }
88 1886932
        *buf = '\0';    /* NUL-terminated */
89 1886932
        return (l + 1);
90
}
91
92
/* These cannot be struct lock, which depends on vsm/vsl working */
93
static pthread_mutex_t vsl_mtx;
94
static pthread_mutex_t vsc_mtx;
95
static pthread_mutex_t vsm_mtx;
96
97
static struct VSL_head          *vsl_head;
98
static const uint32_t           *vsl_end;
99
static uint32_t                 *vsl_ptr;
100
static unsigned                 vsl_segment_n;
101
static ssize_t                  vsl_segsize;
102
103
struct VSC_main *VSC_C_main;
104
105
static void
106 18327965
vsl_sanity(const struct vsl_log *vsl)
107
{
108 18327965
        AN(vsl);
109 18327965
        AN(vsl->wlp);
110 18327965
        AN(vsl->wlb);
111 18327965
        AN(vsl->wle);
112 18327965
        assert(vsl->wlb <= vsl->wlp);
113 18327965
        assert(vsl->wlp <= vsl->wle);
114 18327965
}
115
116
/*--------------------------------------------------------------------
117
 * Check if the VSL_tag is masked by parameter bitmap
118
 */
119
120
static inline int
121 13839255
vsl_tag_is_masked(enum VSL_tag_e tag)
122
{
123 13839255
        volatile uint8_t *bm = &cache_param->vsl_mask[0];
124
        uint8_t b;
125
126 13839255
        assert(tag > SLT__Bogus);
127 13839255
        assert(tag < SLT__Reserved);
128 13839255
        bm += ((unsigned)tag >> 3);
129 13839255
        b = (0x80 >> ((unsigned)tag & 7));
130 13839255
        return (*bm & b);
131
}
132
133
int
134 43907
VSL_tag_is_masked(enum VSL_tag_e tag)
135
{
136 43907
        return (vsl_tag_is_masked(tag));
137
}
138
139
/*--------------------------------------------------------------------
140
 * Lay down a header fields, and return pointer to the next record
141
 */
142
143
static inline uint32_t *
144 11366910
vsl_hdr(enum VSL_tag_e tag, uint32_t *p, unsigned len, vxid_t vxid)
145
{
146
147 11366910
        AZ((uintptr_t)p & 0x3);
148 11366910
        assert(tag > SLT__Bogus);
149 11366910
        assert(tag < SLT__Reserved);
150 11366910
        AZ(len & ~VSL_LENMASK);
151
152 11366910
        p[2] = vxid.vxid >> 32;
153 11366910
        p[1] = vxid.vxid;
154 22733820
        p[0] = (((unsigned)tag & VSL_IDMASK) << VSL_IDSHIFT) |
155 11366910
             (VSL_VERSION_3 << VSL_VERSHIFT) |
156 11366910
             len;
157 11366910
        return (VSL_END(p, len));
158
}
159
160
/*--------------------------------------------------------------------
161
 * Space available in a VSL buffer when accounting for overhead
162
 */
163
164
static unsigned
165 2763310
vsl_space(const struct vsl_log *vsl)
166
{
167
        ptrdiff_t mlen;
168
169 2763310
        mlen = vsl->wle - vsl->wlp;
170 2763310
        assert(mlen >= 0);
171 2763310
        if (mlen < VSL_OVERHEAD + 1)
172 120
                return (0);
173 2763190
        mlen -= VSL_OVERHEAD;
174 2763190
        mlen *= sizeof *vsl->wlp;
175 2763190
        if (mlen > cache_param->vsl_reclen)
176 2762378
                mlen = cache_param->vsl_reclen;
177 2763190
        return(mlen);
178 2763310
}
179
180
/*--------------------------------------------------------------------
181
 * Wrap the VSL buffer
182
 */
183
184
static void
185 0
vsl_wrap(void)
186
{
187
188 0
        assert(vsl_ptr >= vsl_head->log);
189 0
        assert(vsl_ptr < vsl_end);
190 0
        vsl_segment_n += VSL_SEGMENTS - (vsl_segment_n % VSL_SEGMENTS);
191 0
        assert(vsl_segment_n % VSL_SEGMENTS == 0);
192 0
        vsl_head->offset[0] = 0;
193 0
        vsl_head->log[0] = VSL_ENDMARKER;
194 0
        VWMB();
195 0
        if (vsl_ptr != vsl_head->log) {
196 0
                *vsl_ptr = VSL_WRAPMARKER;
197 0
                vsl_ptr = vsl_head->log;
198 0
        }
199 0
        vsl_head->segment_n = vsl_segment_n;
200 0
        VSC_C_main->shm_cycles++;
201 0
}
202
203
/*--------------------------------------------------------------------
204
 * Reserve bytes for a record, wrap if necessary
205
 */
206
207
static uint32_t *
208 3264055
vsl_get(unsigned len, unsigned records, unsigned flushes)
209
{
210
        uint32_t *p;
211
        int err;
212
213 3264055
        err = pthread_mutex_trylock(&vsl_mtx);
214 3264055
        if (err == EBUSY) {
215 17152
                PTOK(pthread_mutex_lock(&vsl_mtx));
216 17152
                VSC_C_main->shm_cont++;
217 17152
        } else {
218 3246903
                AZ(err);
219
        }
220 3264055
        assert(vsl_ptr < vsl_end);
221 3264055
        AZ((uintptr_t)vsl_ptr & 0x3);
222
223 3264055
        VSC_C_main->shm_writes++;
224 3264055
        VSC_C_main->shm_flushes += flushes;
225 3264055
        VSC_C_main->shm_records += records;
226 3264055
        VSC_C_main->shm_bytes +=
227 3264055
            VSL_BYTES(VSL_OVERHEAD + VSL_WORDS((uint64_t)len));
228
229
        /* Wrap if necessary */
230 3264055
        if (VSL_END(vsl_ptr, len) >= vsl_end)
231 0
                vsl_wrap();
232
233 3264055
        p = vsl_ptr;
234 3264055
        vsl_ptr = VSL_END(vsl_ptr, len);
235 3264055
        assert(vsl_ptr < vsl_end);
236 3264055
        AZ((uintptr_t)vsl_ptr & 0x3);
237
238 3264055
        *vsl_ptr = VSL_ENDMARKER;
239
240 3264135
        while ((vsl_ptr - vsl_head->log) / vsl_segsize >
241 3264135
            vsl_segment_n % VSL_SEGMENTS) {
242 80
                vsl_segment_n++;
243 80
                vsl_head->offset[vsl_segment_n % VSL_SEGMENTS] =
244 80
                    vsl_ptr - vsl_head->log;
245
        }
246
247 3264055
        PTOK(pthread_mutex_unlock(&vsl_mtx));
248
        /* Implicit VWMB() in mutex op ensures ENDMARKER and new table
249
           values are seen before new segment number */
250 3264055
        vsl_head->segment_n = vsl_segment_n;
251
252 3264055
        return (p);
253
}
254
255
/*--------------------------------------------------------------------
256
 * Stick a finished record into VSL.
257
 */
258
259
static void
260 2222200
vslr(enum VSL_tag_e tag, vxid_t vxid, const char *b, unsigned len)
261
{
262
        uint32_t *p;
263
        unsigned mlen;
264
265 2222200
        mlen = cache_param->vsl_reclen;
266
267
        /* Truncate */
268 2222200
        if (len > mlen)
269 0
                len = mlen;
270
271 2222200
        p = vsl_get(len, 1, 0);
272
273 2222200
        memcpy(p + VSL_OVERHEAD, b, len);
274
275
        /*
276
         * the vxid needs to be written before the barrier to
277
         * ensure it is valid when vsl_hdr() marks the record
278
         * ready by writing p[0]
279
         */
280 2222200
        p[2] = vxid.vxid >> 32;
281 2222200
        p[1] = vxid.vxid;
282 2222200
        VWMB();
283 2222200
        (void)vsl_hdr(tag, p, len, vxid);
284 2222200
}
285
286
/*--------------------------------------------------------------------
287
 * Add a unbuffered record to VSL
288
 *
289
 * NB: This variant should be used sparingly and only for low volume
290
 * NB: since it significantly adds to the mutex load on the VSL.
291
 */
292
293
void
294 2993853
VSLv(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, va_list ap)
295
{
296 2993853
        unsigned n, mlen = cache_param->vsl_reclen;
297 2993853
        char buf[mlen];
298
299 2993853
        AN(fmt);
300 2993853
        if (vsl_tag_is_masked(tag))
301 779342
                return;
302
303 2214511
        if (strchr(fmt, '%') == NULL) {
304 37896
                vslr(tag, vxid, fmt, strlen(fmt) + 1);
305 37896
        } else {
306 2176615
                n = vsnprintf(buf, mlen, fmt, ap);
307 2176615
                n = vmin(n, mlen - 1);
308 2176615
                buf[n++] = '\0'; /* NUL-terminated */
309 2176615
                vslr(tag, vxid, buf, n);
310
        }
311
312 2993853
}
313
314
void
315 7680
VSLs(enum VSL_tag_e tag, vxid_t vxid, const struct strands *s)
316
{
317 7680
        unsigned n, mlen = cache_param->vsl_reclen;
318 7680
        char buf[mlen];
319
320 7680
        if (vsl_tag_is_masked(tag))
321 0
                return;
322
323 7680
        n = strands_cat(buf, mlen, s);
324
325 7680
        vslr(tag, vxid, buf, n);
326 7680
}
327
328
void
329 2928067
VSL(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, ...)
330
{
331
        va_list ap;
332
333 2928067
        va_start(ap, fmt);
334 2928067
        VSLv(tag, vxid, fmt, ap);
335 2928067
        va_end(ap);
336 2928067
}
337
338
/*--------------------------------------------------------------------*/
339
340
void
341 1331697
VSL_Flush(struct vsl_log *vsl, int overflow)
342
{
343
        uint32_t *p;
344
        unsigned l;
345
346 1331697
        vsl_sanity(vsl);
347 1331697
        l = pdiff(vsl->wlb, vsl->wlp);
348 1331697
        if (l == 0)
349 292312
                return;
350
351 1039385
        assert(l >= 8);
352
353 1039385
        p = vsl_get(l, vsl->wlr, overflow);
354
355 1039385
        memcpy(p + VSL_OVERHEAD, vsl->wlb, l);
356 1039385
        p[1] = l;
357 1039385
        VWMB();
358 1039385
        p[0] = ((((unsigned)SLT__Batch & 0xff) << VSL_IDSHIFT));
359 1039385
        vsl->wlp = vsl->wlb;
360 1039385
        vsl->wlr = 0;
361 1331697
}
362
363
/*--------------------------------------------------------------------
364
 * Buffered VSLs
365
 */
366
367
static char *
368 9092947
vslb_get(struct vsl_log *vsl, enum VSL_tag_e tag, unsigned *length)
369
{
370 9092947
        unsigned mlen = cache_param->vsl_reclen;
371
        char *retval;
372
373 9092947
        vsl_sanity(vsl);
374 9092947
        if (*length < mlen)
375 9070567
                mlen = *length;
376
377 9092947
        if (VSL_END(vsl->wlp, mlen) > vsl->wle)
378 320
                VSL_Flush(vsl, 1);
379
380 9092947
        retval = VSL_DATA(vsl->wlp);
381
382
        /* If it still doesn't fit, truncate */
383 9092947
        if (VSL_END(vsl->wlp, mlen) > vsl->wle)
384 80
                mlen = vsl_space(vsl);
385
386 9092947
        vsl->wlp = vsl_hdr(tag, vsl->wlp, mlen, vsl->wid);
387 9092947
        vsl->wlr++;
388 9092947
        *length = mlen;
389 9092947
        return (retval);
390
}
391
392
static void
393 4454163
vslb_simple(struct vsl_log *vsl, enum VSL_tag_e tag,
394
    unsigned length, const char *str)
395
{
396
        char *p;
397
398 4454163
        if (length == 0)
399 284227
                length = strlen(str);
400 4454163
        length += 1; // NUL
401 4454163
        p = vslb_get(vsl, tag, &length);
402 4454163
        memcpy(p, str, length - 1);
403 4454163
        p[length - 1] = '\0';
404
405 4454163
        if (DO_DEBUG(DBG_SYNCVSL))
406 371374
                VSL_Flush(vsl, 0);
407 4454163
}
408
409
/*--------------------------------------------------------------------
410
 * VSL-buffered-txt
411
 */
412
413
void
414 5092034
VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
415
{
416
417 5092034
        Tcheck(t);
418 5092034
        if (vsl_tag_is_masked(tag))
419 683619
                return;
420
421 4408415
        vslb_simple(vsl, tag, Tlen(t), t.b);
422 5092034
}
423
424
/*--------------------------------------------------------------------
425
 * VSL-buffered-strands
426
 */
427
void
428 2229818
VSLbs(struct vsl_log *vsl, enum VSL_tag_e tag, const struct strands *s)
429
{
430
        unsigned l;
431
        char *p;
432
433 2229818
        if (vsl_tag_is_masked(tag))
434 350150
                return;
435
436 1879668
        l = strands_len(s) + 1;
437 1879668
        p = vslb_get(vsl, tag, &l);
438
439 1879668
        (void)strands_cat(p, l, s);
440
441 1879668
        if (DO_DEBUG(DBG_SYNCVSL))
442 147250
                VSL_Flush(vsl, 0);
443 2229818
}
444
445
/*--------------------------------------------------------------------
446
 * VSL-buffered
447
 */
448
449
void
450 3377839
VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
451
{
452
        char *p, *p1;
453 3377839
        unsigned n = 0, mlen;
454
        va_list ap2;
455
456 3377839
        AN(fmt);
457 3377839
        if (vsl_tag_is_masked(tag))
458 568689
                return;
459
460
        /*
461
         * If there are no printf-expansions, don't waste time expanding them
462
         */
463 2809150
        if (strchr(fmt, '%') == NULL) {
464 44947
                vslb_simple(vsl, tag, 0, fmt);
465 44947
                return;
466
        }
467
468
        /*
469
         * If the format is trivial, deal with it directly
470
         */
471 2764203
        if (!strcmp(fmt, "%s")) {
472 720
                p1 = va_arg(ap, char *);
473 720
                vslb_simple(vsl, tag, 0, p1);
474 720
                return;
475
        }
476
477 2763483
        vsl_sanity(vsl);
478
479 2763483
        mlen = vsl_space(vsl);
480
481
        // First attempt, only if any space at all
482 2763483
        if (mlen > 0) {
483 2762761
                p = VSL_DATA(vsl->wlp);
484 2762761
                va_copy(ap2, ap);
485 2762761
                n = vsnprintf(p, mlen, fmt, ap2);
486 2762761
                va_end(ap2);
487 2762761
        }
488
489
        // Second attempt, if a flush might help
490 2763483
        if (mlen == 0 || (n + 1 > mlen && n + 1 <= cache_param->vsl_reclen)) {
491 2402
                VSL_Flush(vsl, 1);
492 2402
                mlen = vsl_space(vsl);
493 2402
                p = VSL_DATA(vsl->wlp);
494 2402
                n = vsnprintf(p, mlen, fmt, ap);
495 2402
        }
496 2763883
        if (n + 1 < mlen)
497 2753257
                mlen = n + 1;
498 2763883
        (void)vslb_get(vsl, tag, &mlen);
499
500 2763883
        if (DO_DEBUG(DBG_SYNCVSL))
501 272070
                VSL_Flush(vsl, 0);
502 3378239
}
503
504
void
505 3240673
VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
506
{
507
        va_list ap;
508
509 3240673
        vsl_sanity(vsl);
510 3240673
        va_start(ap, fmt);
511 3240673
        VSLbv(vsl, tag, fmt, ap);
512 3240673
        va_end(ap);
513 3240673
}
514
515
#define Tf6 "%ju.%06ju"
516
#define Ta6(t) (uintmax_t)floor((t)), (uintmax_t)floor((t) * 1e6) % 1000000U
517
518
void
519 1260854
VSLb_ts(struct vsl_log *vsl, const char *event, vtim_real first,
520
    vtim_real *pprev, vtim_real now)
521
{
522
523
        /*
524
         * XXX: Make an option to turn off some unnecessary timestamp
525
         * logging. This must be done carefully because some functions
526
         * (e.g. V1L_Open) takes the last timestamp as its initial
527
         * value for timeout calculation.
528
         */
529 1260854
        vsl_sanity(vsl);
530 1260854
        AN(event);
531 1260854
        AN(pprev);
532 1260854
        assert(!isnan(now) && now != 0.);
533 2521708
        VSLb(vsl, SLT_Timestamp, "%s: " Tf6 " " Tf6 " " Tf6,
534 1260854
            event, Ta6(now), Ta6(now - first), Ta6(now - *pprev));
535 1260854
        *pprev = now;
536 1260854
}
537
538
void
539 104404
VSLb_bin(struct vsl_log *vsl, enum VSL_tag_e tag, ssize_t len, const void *ptr)
540
{
541
        unsigned mlen;
542
        char *p;
543
544 104404
        vsl_sanity(vsl);
545 104404
        AN(ptr);
546 104404
        if (vsl_tag_is_masked(tag))
547 48044
                return;
548 56360
        mlen = cache_param->vsl_reclen;
549
550
        /* Truncate */
551 56360
        len = vmin_t(ssize_t, len, mlen);
552
553 56360
        assert(vsl->wlp <= vsl->wle);
554
555
        /* Flush if necessary */
556 56360
        if (VSL_END(vsl->wlp, len) > vsl->wle)
557 360
                VSL_Flush(vsl, 1);
558 56360
        assert(VSL_END(vsl->wlp, len) <= vsl->wle);
559 56360
        p = VSL_DATA(vsl->wlp);
560 56360
        memcpy(p, ptr, len);
561 56360
        vsl->wlp = vsl_hdr(tag, vsl->wlp, len, vsl->wid);
562 56360
        assert(vsl->wlp <= vsl->wle);
563 56360
        vsl->wlr++;
564
565 56360
        if (DO_DEBUG(DBG_SYNCVSL))
566 23880
                VSL_Flush(vsl, 0);
567 104404
}
568
569
/*--------------------------------------------------------------------
570
 * Setup a VSL buffer, allocate space if none provided.
571
 */
572
573
void
574 316047
VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len)
575
{
576
577 316047
        if (ptr == NULL) {
578 109791
                len = cache_param->vsl_buffer;
579 109791
                ptr = malloc(len);
580 109791
                AN(ptr);
581 109791
        }
582 316047
        vsl->wlp = ptr;
583 316047
        vsl->wlb = ptr;
584 316047
        vsl->wle = ptr;
585 316047
        vsl->wle += len / sizeof(*vsl->wle);
586 316047
        vsl->wlr = 0;
587 316047
        vsl->wid = NO_VXID;
588 316047
        vsl_sanity(vsl);
589 316047
}
590
591
/*--------------------------------------------------------------------*/
592
593
void
594 3680
VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, vxid_t vxid)
595
{
596
        vxid_t ovxid;
597
598 3680
        vsl_sanity(vsl);
599 3680
        ovxid = vsl->wid;
600 3680
        VSLb(vsl, SLT_Link, "%s %ju %s", typ, VXID(vxid), why);
601 3680
        VSL_End(vsl);
602 3680
        vsl->wid = vxid;
603 3680
        VSLb(vsl, SLT_Begin, "%s %ju %s", typ, VXID(ovxid), why);
604 3680
}
605
606
/*--------------------------------------------------------------------*/
607
608
void
609 238559
VSL_End(struct vsl_log *vsl)
610
{
611
        txt t;
612 238559
        char p[] = "";
613
614 238559
        vsl_sanity(vsl);
615 238559
        assert(!IS_NO_VXID(vsl->wid));
616 238559
        t.b = p;
617 238559
        t.e = p;
618 238559
        VSLbt(vsl, SLT_End, t);
619 238559
        VSL_Flush(vsl, 0);
620 238559
        vsl->wid = NO_VXID;
621 238559
}
622
623
static void v_matchproto_(vsm_lock_f)
624 1272467
vsm_vsc_lock(void)
625
{
626 1272467
        PTOK(pthread_mutex_lock(&vsc_mtx));
627 1272467
}
628
629
static void v_matchproto_(vsm_lock_f)
630 1272465
vsm_vsc_unlock(void)
631
{
632 1272465
        PTOK(pthread_mutex_unlock(&vsc_mtx));
633 1272465
}
634
635
static void v_matchproto_(vsm_lock_f)
636 1565811
vsm_vsmw_lock(void)
637
{
638 1565811
        PTOK(pthread_mutex_lock(&vsm_mtx));
639 1565811
}
640
641
static void v_matchproto_(vsm_lock_f)
642 1565809
vsm_vsmw_unlock(void)
643
{
644 1565809
        PTOK(pthread_mutex_unlock(&vsm_mtx));
645 1565809
}
646
647
/*--------------------------------------------------------------------*/
648
649
void
650 36630
VSM_Init(void)
651
{
652
        unsigned u;
653
654 36630
        assert(UINT_MAX % VSL_SEGMENTS == VSL_SEGMENTS - 1);
655
656 36630
        PTOK(pthread_mutex_init(&vsl_mtx, &mtxattr_errorcheck));
657 36630
        PTOK(pthread_mutex_init(&vsc_mtx, &mtxattr_errorcheck));
658 36630
        PTOK(pthread_mutex_init(&vsm_mtx, &mtxattr_errorcheck));
659
660 36630
        vsc_lock = vsm_vsc_lock;
661 36630
        vsc_unlock = vsm_vsc_unlock;
662 36630
        vsmw_lock = vsm_vsmw_lock;
663 36630
        vsmw_unlock = vsm_vsmw_unlock;
664
665 36630
        heritage.proc_vsmw = VSMW_New(heritage.vsm_fd, 0640, "_.index");
666 36630
        AN(heritage.proc_vsmw);
667
668 36630
        VSC_C_main = VSC_main_New(NULL, NULL, "");
669 36630
        AN(VSC_C_main);
670
671 36630
        AN(heritage.proc_vsmw);
672 73260
        vsl_head = VSMW_Allocf(heritage.proc_vsmw, NULL, VSL_CLASS,
673 36630
            cache_param->vsl_space, VSL_CLASS);
674 36630
        AN(vsl_head);
675 73260
        vsl_segsize = ((cache_param->vsl_space - sizeof *vsl_head) /
676 36630
            sizeof *vsl_end) / VSL_SEGMENTS;
677 36630
        vsl_end = vsl_head->log + vsl_segsize * VSL_SEGMENTS;
678
        /* Make segment_n always overflow on first log wrap to make any
679
           problems with regard to readers on that event visible */
680 36630
        vsl_segment_n = UINT_MAX - (VSL_SEGMENTS - 1);
681 36630
        AZ(vsl_segment_n % VSL_SEGMENTS);
682 36630
        vsl_ptr = vsl_head->log;
683 36630
        *vsl_ptr = VSL_ENDMARKER;
684
685 36630
        memset(vsl_head, 0, sizeof *vsl_head);
686 36630
        vsl_head->segsize = vsl_segsize;
687 36630
        vsl_head->offset[0] = 0;
688 36630
        vsl_head->segment_n = vsl_segment_n;
689 293040
        for (u = 1; u < VSL_SEGMENTS; u++)
690 256410
                vsl_head->offset[u] = -1;
691 36630
        VWMB();
692 36630
        memcpy(vsl_head->marker, VSL_HEAD_MARKER, sizeof vsl_head->marker);
693 36630
}