varnish-cache/bin/varnishd/cache/cache_shmlog.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 */
30
31
#include "config.h"
32
33
#include "cache_varnishd.h"
34
35
#include <stdio.h>
36
#include <stdlib.h>
37
38
#include "vgz.h"
39
#include "vsl_priv.h"
40
#include "vmb.h"
41
42
#include "common/heritage.h"
43
#include "common/vsmw.h"
44
45
/* ------------------------------------------------------------
46
 * strands helpers - move elsewhere?
47
 */
48
49
static unsigned
50 398178
strands_len(const struct strands *s)
51
{
52 398178
        unsigned r = 0;
53
        int i;
54
55 398178
        CHECK_OBJ_NOTNULL(s, STRANDS_MAGIC);
56 796839
        for (i = 0; i < s->n; i++) {
57 398661
                if (s->p[i] == NULL || *s->p[i] == '\0')
58 37935
                        continue;
59 360726
                r += strlen(s->p[i]);
60 360726
        }
61
62 398178
        return (r);
63
}
64
65
/*
66
 * like VRT_Strands(), but truncating instead of failing for end of buffer
67
 *
68
 * returns number of bytes including NUL
69
 */
70
static unsigned
71 399699
strands_cat(char *buf, unsigned bufl, const struct strands *s)
72
{
73 399699
        unsigned l = 0, ll;
74
        int i;
75
76
        /* NUL-terminated */
77 399699
        assert(bufl > 0);
78 399699
        bufl--;
79 399699
        CHECK_OBJ_NOTNULL(s, STRANDS_MAGIC);
80
81 762734
        for (i = 0; i < s->n && bufl > 0; i++) {
82 363061
                if (s->p[i] == NULL || *s->p[i] == '\0')
83 42
                        continue;
84 363019
                ll = vmin_t(unsigned, strlen(s->p[i]), bufl);
85 363019
                memcpy(buf, s->p[i], ll);
86 363019
                l += ll;
87 363019
                buf += ll;
88 363019
                bufl -= ll;
89 363019
        }
90 399673
        *buf = '\0';    /* NUL-terminated */
91 399673
        return (l + 1);
92
}
93
94
/* These cannot be struct lock, which depends on vsm/vsl working */
95
static pthread_mutex_t vsl_mtx;
96
static pthread_mutex_t vsc_mtx;
97
static pthread_mutex_t vsm_mtx;
98
99
static struct VSL_head          *vsl_head;
100
static const uint32_t           *vsl_end;
101
static uint32_t                 *vsl_ptr;
102
static unsigned                 vsl_segment_n;
103
static ssize_t                  vsl_segsize;
104
105
struct VSC_main *VSC_C_main;
106
107
static void
108 3955496
vsl_sanity(const struct vsl_log *vsl)
109
{
110 3955496
        AN(vsl);
111 3955496
        AN(vsl->wlp);
112 3955496
        AN(vsl->wlb);
113 3955496
        AN(vsl->wle);
114 3955496
        assert(vsl->wlb <= vsl->wlp);
115 3955496
        assert(vsl->wlp <= vsl->wle);
116 3955496
}
117
118
/*--------------------------------------------------------------------
119
 * Check if the VSL_tag is masked by parameter bitmap
120
 */
121
122
static inline int
123 2939269
vsl_tag_is_masked(enum VSL_tag_e tag)
124
{
125 2939269
        volatile uint8_t *bm = &cache_param->vsl_mask[0];
126
        uint8_t b;
127
128 2939269
        assert(tag > SLT__Bogus);
129 2939269
        assert(tag < SLT__Reserved);
130 2939269
        bm += ((unsigned)tag >> 3);
131 2939269
        b = (0x80 >> ((unsigned)tag & 7));
132 2939269
        return (*bm & b);
133
}
134
135
int
136 10216
VSL_tag_is_masked(enum VSL_tag_e tag)
137
{
138 10216
        return (vsl_tag_is_masked(tag));
139
}
140
141
/*--------------------------------------------------------------------
142
 * Lay down a header fields, and return pointer to the next record
143
 */
144
145
static inline uint32_t *
146 2422464
vsl_hdr(enum VSL_tag_e tag, uint32_t *p, unsigned len, vxid_t vxid)
147
{
148
149 2422464
        AZ((uintptr_t)p & 0x3);
150 2422464
        assert(tag > SLT__Bogus);
151 2422464
        assert(tag < SLT__Reserved);
152 2422464
        AZ(len & ~VSL_LENMASK);
153
154 2422464
        p[2] = vxid.vxid >> 32;
155 2422464
        p[1] = vxid.vxid;
156 4844928
        p[0] = (((unsigned)tag & VSL_IDMASK) << VSL_IDSHIFT) |
157 2422464
             (VSL_VERSION_3 << VSL_VERSHIFT) |
158 2422464
             len;
159 2422464
        return (VSL_END(p, len));
160
}
161
162
/*--------------------------------------------------------------------
163
 * Space available in a VSL buffer when accounting for overhead
164
 */
165
166
static unsigned
167 589124
vsl_space(const struct vsl_log *vsl)
168
{
169
        ptrdiff_t mlen;
170
171 589124
        mlen = vsl->wle - vsl->wlp;
172 589124
        assert(mlen >= 0);
173 589124
        if (mlen < VSL_OVERHEAD + 1)
174 8
                return (0);
175 589116
        mlen -= VSL_OVERHEAD;
176 589116
        mlen *= sizeof *vsl->wlp;
177 589116
        if (mlen > cache_param->vsl_reclen)
178 588978
                mlen = cache_param->vsl_reclen;
179 589116
        return(mlen);
180 589124
}
181
182
/*--------------------------------------------------------------------
183
 * Wrap the VSL buffer
184
 */
185
186
static void
187 0
vsl_wrap(void)
188
{
189
190 0
        assert(vsl_ptr >= vsl_head->log);
191 0
        assert(vsl_ptr < vsl_end);
192 0
        vsl_segment_n += VSL_SEGMENTS - (vsl_segment_n % VSL_SEGMENTS);
193 0
        assert(vsl_segment_n % VSL_SEGMENTS == 0);
194 0
        vsl_head->offset[0] = 0;
195 0
        vsl_head->log[0] = VSL_ENDMARKER;
196 0
        VWMB();
197 0
        if (vsl_ptr != vsl_head->log) {
198 0
                *vsl_ptr = VSL_WRAPMARKER;
199 0
                vsl_ptr = vsl_head->log;
200 0
        }
201 0
        vsl_head->segment_n = vsl_segment_n;
202 0
        VSC_C_main->shm_cycles++;
203 0
}
204
205
/*--------------------------------------------------------------------
206
 * Reserve bytes for a record, wrap if necessary
207
 */
208
209
static uint32_t *
210 718541
vsl_get(unsigned len, unsigned records, unsigned flushes)
211
{
212
        uint32_t *p;
213
        int err;
214
215 718541
        err = pthread_mutex_trylock(&vsl_mtx);
216 718541
        if (err == EBUSY) {
217 4862
                PTOK(pthread_mutex_lock(&vsl_mtx));
218 4862
                VSC_C_main->shm_cont++;
219 4862
        } else {
220 713679
                AZ(err);
221
        }
222 718541
        assert(vsl_ptr < vsl_end);
223 718541
        AZ((uintptr_t)vsl_ptr & 0x3);
224
225 718541
        VSC_C_main->shm_writes++;
226 718541
        VSC_C_main->shm_flushes += flushes;
227 718541
        VSC_C_main->shm_records += records;
228 718541
        VSC_C_main->shm_bytes +=
229 718541
            VSL_BYTES(VSL_OVERHEAD + VSL_WORDS((uint64_t)len));
230
231
        /* Wrap if necessary */
232 718541
        if (VSL_END(vsl_ptr, len) >= vsl_end)
233 0
                vsl_wrap();
234
235 718541
        p = vsl_ptr;
236 718541
        vsl_ptr = VSL_END(vsl_ptr, len);
237 718541
        assert(vsl_ptr < vsl_end);
238 718541
        AZ((uintptr_t)vsl_ptr & 0x3);
239
240 718541
        *vsl_ptr = VSL_ENDMARKER;
241
242 718557
        while ((vsl_ptr - vsl_head->log) / vsl_segsize >
243 718557
            vsl_segment_n % VSL_SEGMENTS) {
244 16
                vsl_segment_n++;
245 16
                vsl_head->offset[vsl_segment_n % VSL_SEGMENTS] =
246 16
                    vsl_ptr - vsl_head->log;
247
        }
248
249 718541
        PTOK(pthread_mutex_unlock(&vsl_mtx));
250
        /* Implicit VWMB() in mutex op ensures ENDMARKER and new table
251
           values are seen before new segment number */
252 718541
        vsl_head->segment_n = vsl_segment_n;
253
254 718541
        return (p);
255
}
256
257
/*--------------------------------------------------------------------
258
 * Stick a finished record into VSL.
259
 */
260
261
static void
262 466520
vslr(enum VSL_tag_e tag, vxid_t vxid, const char *b, unsigned len)
263
{
264
        uint32_t *p;
265
        unsigned mlen;
266
267 466520
        mlen = cache_param->vsl_reclen;
268
269
        /* Truncate */
270 466520
        if (len > mlen)
271 0
                len = mlen;
272
273 466520
        p = vsl_get(len, 1, 0);
274
275 466520
        memcpy(p + VSL_OVERHEAD, b, len);
276
277
        /*
278
         * the vxid needs to be written before the barrier to
279
         * ensure it is valid when vsl_hdr() marks the record
280
         * ready by writing p[0]
281
         */
282 466520
        p[2] = vxid.vxid >> 32;
283 466520
        p[1] = vxid.vxid;
284 466520
        VWMB();
285 466520
        (void)vsl_hdr(tag, p, len, vxid);
286 466520
}
287
288
/*--------------------------------------------------------------------
289
 * Add a unbuffered record to VSL
290
 *
291
 * NB: This variant should be used sparingly and only for low volume
292
 * NB: since it significantly adds to the mutex load on the VSL.
293
 */
294
295
void
296 627115
VSLv(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, va_list ap)
297
{
298 627115
        unsigned n, mlen = cache_param->vsl_reclen;
299 627115
        char buf[mlen];
300
301 627115
        AN(fmt);
302 627115
        if (vsl_tag_is_masked(tag))
303 162159
                return;
304
305 464956
        if (strchr(fmt, '%') == NULL) {
306 9205
                vslr(tag, vxid, fmt, strlen(fmt) + 1);
307 9205
        } else {
308 455751
                n = vsnprintf(buf, mlen, fmt, ap);
309 455751
                n = vmin(n, mlen - 1);
310 455751
                buf[n++] = '\0'; /* NUL-terminated */
311 455751
                vslr(tag, vxid, buf, n);
312
        }
313
314 627115
}
315
316
void
317 1592
VSLs(enum VSL_tag_e tag, vxid_t vxid, const struct strands *s)
318
{
319 1592
        unsigned n, mlen = cache_param->vsl_reclen;
320 1592
        char buf[mlen];
321
322 1592
        if (vsl_tag_is_masked(tag))
323 0
                return;
324
325 1592
        n = strands_cat(buf, mlen, s);
326
327 1592
        vslr(tag, vxid, buf, n);
328 1592
}
329
330
void
331 613942
VSL(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, ...)
332
{
333
        va_list ap;
334
335 613942
        va_start(ap, fmt);
336 613942
        VSLv(tag, vxid, fmt, ap);
337 613942
        va_end(ap);
338 613942
}
339
340
/*--------------------------------------------------------------------*/
341
342
void
343 317077
VSL_Flush(struct vsl_log *vsl, int overflow)
344
{
345
        uint32_t *p;
346
        unsigned l;
347
348 317077
        vsl_sanity(vsl);
349 317077
        l = pdiff(vsl->wlb, vsl->wlp);
350 317077
        if (l == 0)
351 65719
                return;
352
353 251358
        assert(l >= 8);
354
355 251358
        p = vsl_get(l, vsl->wlr, overflow);
356
357 251358
        memcpy(p + VSL_OVERHEAD, vsl->wlb, l);
358 251358
        p[1] = l;
359 251358
        VWMB();
360 251358
        p[0] = ((((unsigned)SLT__Batch & 0xff) << VSL_IDSHIFT));
361 251358
        vsl->wlp = vsl->wlb;
362 251358
        vsl->wlr = 0;
363 317077
}
364
365
/*--------------------------------------------------------------------
366
 * Buffered VSLs
367
 */
368
369
static char *
370 1945006
vslb_get(struct vsl_log *vsl, enum VSL_tag_e tag, unsigned *length)
371
{
372 1945006
        unsigned mlen = cache_param->vsl_reclen;
373
        char *retval;
374
375 1945006
        vsl_sanity(vsl);
376 1945006
        if (*length < mlen)
377 1940368
                mlen = *length;
378
379 1945006
        if (VSL_END(vsl->wlp, mlen) > vsl->wle)
380 64
                VSL_Flush(vsl, 1);
381
382 1945006
        retval = VSL_DATA(vsl->wlp);
383
384
        /* If it still doesn't fit, truncate */
385 1945006
        if (VSL_END(vsl->wlp, mlen) > vsl->wle)
386 16
                mlen = vsl_space(vsl);
387
388 1945006
        vsl->wlp = vsl_hdr(tag, vsl->wlp, mlen, vsl->wid);
389 1945006
        vsl->wlr++;
390 1945006
        *length = mlen;
391 1945006
        return (retval);
392
}
393
394
static void
395 958470
vslb_simple(struct vsl_log *vsl, enum VSL_tag_e tag,
396
    unsigned length, const char *str)
397
{
398
        char *p;
399
400 958470
        if (length == 0)
401 74870
                length = strlen(str);
402 958470
        length += 1; // NUL
403 958470
        p = vslb_get(vsl, tag, &length);
404 958470
        memcpy(p, str, length - 1);
405 958470
        p[length - 1] = '\0';
406
407 958470
        if (DO_DEBUG(DBG_SYNCVSL))
408 93342
                VSL_Flush(vsl, 0);
409 958470
}
410
411
/*--------------------------------------------------------------------
412
 * VSL-buffered-txt
413
 */
414
415
void
416 1075814
VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
417
{
418
419 1075814
        Tcheck(t);
420 1075814
        if (vsl_tag_is_masked(tag))
421 141964
                return;
422
423 933850
        vslb_simple(vsl, tag, Tlen(t), t.b);
424 1075814
}
425
426
/*--------------------------------------------------------------------
427
 * VSL-buffered-strands
428
 */
429
void
430 471089
VSLbs(struct vsl_log *vsl, enum VSL_tag_e tag, const struct strands *s)
431
{
432
        unsigned l;
433
        char *p;
434
435 471089
        if (vsl_tag_is_masked(tag))
436 72918
                return;
437
438 398171
        l = strands_len(s) + 1;
439 398171
        p = vslb_get(vsl, tag, &l);
440
441 398171
        (void)strands_cat(p, l, s);
442
443 398171
        if (DO_DEBUG(DBG_SYNCVSL))
444 37216
                VSL_Flush(vsl, 0);
445 471089
}
446
447
/*--------------------------------------------------------------------
448
 * VSL-buffered
449
 */
450
451
void
452 733044
VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
453
{
454
        char *p, *p1;
455 733044
        unsigned n = 0, mlen;
456
        va_list ap2;
457
458 733044
        AN(fmt);
459 733044
        if (vsl_tag_is_masked(tag))
460 119260
                return;
461
462
        /*
463
         * If there are no printf-expansions, don't waste time expanding them
464
         */
465 613784
        if (strchr(fmt, '%') == NULL) {
466 24486
                vslb_simple(vsl, tag, 0, fmt);
467 24486
                return;
468
        }
469
470
        /*
471
         * If the format is trivial, deal with it directly
472
         */
473 589298
        if (!strcmp(fmt, "%s")) {
474 144
                p1 = va_arg(ap, char *);
475 144
                vslb_simple(vsl, tag, 0, p1);
476 144
                return;
477
        }
478
479 589154
        vsl_sanity(vsl);
480
481 589154
        mlen = vsl_space(vsl);
482
483
        // First attempt, only if any space at all
484 589154
        if (mlen > 0) {
485 589011
                p = VSL_DATA(vsl->wlp);
486 589011
                va_copy(ap2, ap);
487 589011
                n = vsnprintf(p, mlen, fmt, ap2);
488 589011
                va_end(ap2);
489 589011
        }
490
491
        // Second attempt, if a flush might help
492 589154
        if (mlen == 0 || (n + 1 > mlen && n + 1 <= cache_param->vsl_reclen)) {
493 542
                VSL_Flush(vsl, 1);
494 542
                mlen = vsl_space(vsl);
495 542
                p = VSL_DATA(vsl->wlp);
496 542
                n = vsnprintf(p, mlen, fmt, ap);
497 542
        }
498 589234
        if (n + 1 < mlen)
499 587121
                mlen = n + 1;
500 589234
        (void)vslb_get(vsl, tag, &mlen);
501
502 589234
        if (DO_DEBUG(DBG_SYNCVSL))
503 69592
                VSL_Flush(vsl, 0);
504 733124
}
505
506
void
507 704591
VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
508
{
509
        va_list ap;
510
511 704591
        vsl_sanity(vsl);
512 704591
        va_start(ap, fmt);
513 704591
        VSLbv(vsl, tag, fmt, ap);
514 704591
        va_end(ap);
515 704591
}
516
517
#define Tf6 "%ju.%06ju"
518
#define Ta6(t) (uintmax_t)floor((t)), (uintmax_t)floor((t) * 1e6) % 1000000U
519
520
void
521 265232
VSLb_ts(struct vsl_log *vsl, const char *event, vtim_real first,
522
    vtim_real *pprev, vtim_real now)
523
{
524
525
        /*
526
         * XXX: Make an option to turn off some unnecessary timestamp
527
         * logging. This must be done carefully because some functions
528
         * (e.g. V1L_Open) takes the last timestamp as its initial
529
         * value for timeout calculation.
530
         */
531 265232
        vsl_sanity(vsl);
532 265232
        AN(event);
533 265232
        AN(pprev);
534 265232
        assert(!isnan(now) && now != 0.);
535 530464
        VSLb(vsl, SLT_Timestamp, "%s: " Tf6 " " Tf6 " " Tf6,
536 265232
            event, Ta6(now), Ta6(now - first), Ta6(now - *pprev));
537 265232
        *pprev = now;
538 265232
}
539
540
void
541 22399
VSLb_bin(struct vsl_log *vsl, enum VSL_tag_e tag, ssize_t len, const void *ptr)
542
{
543
        unsigned mlen;
544
        char *p;
545
546 22399
        vsl_sanity(vsl);
547 22399
        AN(ptr);
548 22399
        if (vsl_tag_is_masked(tag))
549 10471
                return;
550 11928
        mlen = cache_param->vsl_reclen;
551
552
        /* Truncate */
553 11928
        len = vmin_t(ssize_t, len, mlen);
554
555 11928
        assert(vsl->wlp <= vsl->wle);
556
557
        /* Flush if necessary */
558 11928
        if (VSL_END(vsl->wlp, len) > vsl->wle)
559 72
                VSL_Flush(vsl, 1);
560 11928
        assert(VSL_END(vsl->wlp, len) <= vsl->wle);
561 11928
        p = VSL_DATA(vsl->wlp);
562 11928
        memcpy(p, ptr, len);
563 11928
        vsl->wlp = vsl_hdr(tag, vsl->wlp, len, vsl->wid);
564 11928
        assert(vsl->wlp <= vsl->wle);
565 11928
        vsl->wlr++;
566
567 11928
        if (DO_DEBUG(DBG_SYNCVSL))
568 5000
                VSL_Flush(vsl, 0);
569 22399
}
570
571
/*--------------------------------------------------------------------
572
 * Setup a VSL buffer, allocate space if none provided.
573
 */
574
575
void
576 66042
VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len)
577
{
578
579 66042
        if (ptr == NULL) {
580 22875
                len = cache_param->vsl_buffer;
581 22875
                ptr = malloc(len);
582 22875
                AN(ptr);
583 22875
        }
584 66042
        vsl->wlp = ptr;
585 66042
        vsl->wlb = ptr;
586 66042
        vsl->wle = ptr;
587 66042
        vsl->wle += len / sizeof(*vsl->wle);
588 66042
        vsl->wlr = 0;
589 66042
        vsl->wid = NO_VXID;
590 66042
        vsl_sanity(vsl);
591 66042
}
592
593
/*--------------------------------------------------------------------*/
594
595
void
596 787
VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, vxid_t vxid)
597
{
598
        vxid_t ovxid;
599
600 787
        vsl_sanity(vsl);
601 787
        ovxid = vsl->wid;
602 787
        VSLb(vsl, SLT_Link, "%s %ju %s", typ, VXID(vxid), why);
603 787
        VSL_End(vsl);
604 787
        vsl->wid = vxid;
605 787
        VSLb(vsl, SLT_Begin, "%s %ju %s", typ, VXID(ovxid), why);
606 787
}
607
608
/*--------------------------------------------------------------------*/
609
610
void
611 50243
VSL_End(struct vsl_log *vsl)
612
{
613
        txt t;
614 50243
        char p[] = "";
615
616 50243
        vsl_sanity(vsl);
617 50243
        assert(!IS_NO_VXID(vsl->wid));
618 50243
        t.b = p;
619 50243
        t.e = p;
620 50243
        VSLbt(vsl, SLT_End, t);
621 50243
        VSL_Flush(vsl, 0);
622 50243
        vsl->wid = NO_VXID;
623 50243
}
624
625
static void v_matchproto_(vsm_lock_f)
626 287965
vsm_vsc_lock(void)
627
{
628 287965
        PTOK(pthread_mutex_lock(&vsc_mtx));
629 287965
}
630
631
static void v_matchproto_(vsm_lock_f)
632 287965
vsm_vsc_unlock(void)
633
{
634 287965
        PTOK(pthread_mutex_unlock(&vsc_mtx));
635 287965
}
636
637
static void v_matchproto_(vsm_lock_f)
638 356655
vsm_vsmw_lock(void)
639
{
640 356655
        PTOK(pthread_mutex_lock(&vsm_mtx));
641 356655
}
642
643
static void v_matchproto_(vsm_lock_f)
644 356655
vsm_vsmw_unlock(void)
645
{
646 356655
        PTOK(pthread_mutex_unlock(&vsm_mtx));
647 356655
}
648
649
/*--------------------------------------------------------------------*/
650
651
void
652 7631
VSM_Init(void)
653
{
654
        unsigned u;
655
656 7631
        assert(UINT_MAX % VSL_SEGMENTS == VSL_SEGMENTS - 1);
657
658 7631
        PTOK(pthread_mutex_init(&vsl_mtx, &mtxattr_errorcheck));
659 7631
        PTOK(pthread_mutex_init(&vsc_mtx, &mtxattr_errorcheck));
660 7631
        PTOK(pthread_mutex_init(&vsm_mtx, &mtxattr_errorcheck));
661
662 7631
        vsc_lock = vsm_vsc_lock;
663 7631
        vsc_unlock = vsm_vsc_unlock;
664 7631
        vsmw_lock = vsm_vsmw_lock;
665 7631
        vsmw_unlock = vsm_vsmw_unlock;
666
667 7631
        heritage.proc_vsmw = VSMW_New(heritage.vsm_fd, 0640, "_.index");
668 7631
        AN(heritage.proc_vsmw);
669
670 7631
        VSC_C_main = VSC_main_New(NULL, NULL, "");
671 7631
        AN(VSC_C_main);
672
673 7631
        AN(heritage.proc_vsmw);
674 15262
        vsl_head = VSMW_Allocf(heritage.proc_vsmw, NULL, VSL_CLASS,
675 7631
            cache_param->vsl_space, VSL_CLASS);
676 7631
        AN(vsl_head);
677 15262
        vsl_segsize = ((cache_param->vsl_space - sizeof *vsl_head) /
678 7631
            sizeof *vsl_end) / VSL_SEGMENTS;
679 7631
        vsl_end = vsl_head->log + vsl_segsize * VSL_SEGMENTS;
680
        /* Make segment_n always overflow on first log wrap to make any
681
           problems with regard to readers on that event visible */
682 7631
        vsl_segment_n = UINT_MAX - (VSL_SEGMENTS - 1);
683 7631
        AZ(vsl_segment_n % VSL_SEGMENTS);
684 7631
        vsl_ptr = vsl_head->log;
685 7631
        *vsl_ptr = VSL_ENDMARKER;
686
687 7631
        memset(vsl_head, 0, sizeof *vsl_head);
688 7631
        vsl_head->segsize = vsl_segsize;
689 7631
        vsl_head->offset[0] = 0;
690 7631
        vsl_head->segment_n = vsl_segment_n;
691 61048
        for (u = 1; u < VSL_SEGMENTS; u++)
692 53417
                vsl_head->offset[u] = -1;
693 7631
        VWMB();
694 7631
        memcpy(vsl_head->marker, VSL_HEAD_MARKER, sizeof vsl_head->marker);
695 7631
}