varnish-cache/bin/varnishd/common/common_vsmw.c
0
/*-
1
 * Copyright (c) 2010-2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * VSM stuff common to manager and child.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <fcntl.h>
36
#include <stdarg.h>
37
#include <stdio.h>
38
#include <stdint.h>
39
#include <stdlib.h>
40
#include <string.h>
41
#include <time.h>
42
#include <unistd.h>
43
#include <sys/mman.h>
44
#include <sys/resource.h>
45
#include <sys/stat.h>
46
47
#include "vdef.h"
48
#include "vas.h"
49
#include "vsb.h"
50
#include "miniobj.h"
51
#include "vqueue.h"
52
53
#include "vfil.h"
54
#include "vrnd.h"
55
56
#include "heritage.h"
57
#include "vsmw.h"
58
59
#ifndef MAP_HASSEMAPHORE
60
#  define MAP_HASSEMAPHORE 0 /* XXX Linux */
61
#endif
62
63
#ifndef MAP_NOSYNC
64
#  define MAP_NOSYNC 0 /* XXX Linux */
65
#endif
66
67
static void v_matchproto_(vsm_lock_f)
68 386856
vsmw_dummy_lock(void)
69
{
70 386856
}
71
72
static int vsmw_haslock;
73
vsm_lock_f *vsmw_lock = vsmw_dummy_lock;
74
vsm_lock_f *vsmw_unlock = vsmw_dummy_lock;
75
76
#define vsmw_assert_lock()      AN(vsmw_haslock)
77
78
#define vsmw_do_lock() vsmw_do_lock_(__func__, __LINE__)
79
80
#define vsmw_do_lock_(f, l)                             \
81
        do {                                    \
82
                vsmw_lock();                    \
83
                AZ(vsmw_haslock);               \
84
                vsmw_haslock = 1;               \
85
        } while(0)
86
87
#define vsmw_do_unlock() vsmw_do_unlock_(__func__, __LINE__)
88
#define vsmw_do_unlock_(f, l)                           \
89
        do {                                    \
90
                AN(vsmw_haslock);               \
91
                vsmw_haslock = 0;               \
92
                vsmw_unlock();                  \
93
        } while(0)
94
95
/*--------------------------------------------------------------------*/
96
97
struct vsmw_cluster {
98
        unsigned                        magic;
99
#define VSMW_CLUSTER_MAGIC              0x28b74c00
100
101
        VTAILQ_ENTRY(vsmw_cluster)      list;
102
        struct vsmwseg                  *cseg;
103
        char                            *fn;
104
        size_t                          len;
105
        void                            *ptr;
106
        size_t                          next;
107
        int                             refs;
108
        int                             named;
109
};
110
111
struct vsmwseg {
112
        unsigned                        magic;
113
#define VSMWSEG_MAGIC                   0x7e4ccaea
114
        VTAILQ_ENTRY(vsmwseg)           list;
115
        struct vsmw_cluster             *cluster;
116
117
        char                            *category;
118
        size_t                          off;
119
        size_t                          len;
120
        char                            *id;
121
        void                            *ptr;
122
};
123
124
struct vsmw {
125
        unsigned                        magic;
126
#define VSMW_MAGIC                      0xc2ca2cd9
127
        int                             vdirfd;
128
        int                             mode;
129
        char                            *idx;
130
        VTAILQ_HEAD(, vsmw_cluster)     clusters;
131
        VTAILQ_HEAD(, vsmwseg)          segs;
132
        struct vsb                      *vsb;
133
        pid_t                           pid;
134
        time_t                          birth;
135
        uint64_t                        nsegs;
136
        uint64_t                        nsubs;
137
};
138
139
/* Allocations in clusters never start at offset zero */
140
#define VSM_CLUSTER_OFFSET 16
141
142
/*--------------------------------------------------------------------*/
143
144
static void
145 34880
vsmw_idx_head(const struct vsmw *vsmw, int fd)
146
{
147
        char buf[64];
148
149 34880
        bprintf(buf, "# %jd %jd\n", (intmax_t)vsmw->pid, (intmax_t)vsmw->birth);
150
        // XXX handle ENOSPC? #2764
151 34880
        assert(write(fd, buf, strlen(buf)) == strlen(buf));
152 34880
}
153
154
#define ASSERT_SEG_STR(x) do {                  \
155
                AN(x);                          \
156
                AZ(strchr(x, '\n'));            \
157
        } while (0);
158
159
static void
160 1029788
vsmw_fmt_index(const struct vsmw *vsmw, const struct vsmwseg *seg, char act)
161
{
162
163 1029788
        vsmw_assert_lock();
164 1029788
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
165 1029788
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
166 1029788
        AN(seg->cluster);
167 1029788
        ASSERT_SEG_STR(seg->category);
168 1029788
        ASSERT_SEG_STR(seg->id);
169
170 2059576
        VSB_printf(vsmw->vsb, "%c %s %zu %zu %s %s\n",
171 1029788
            act,
172 1029788
            seg->cluster->fn,
173 1029788
            seg->off,
174 1029788
            seg->len,
175 1029788
            seg->category,
176 1029788
            seg->id);
177 1029788
}
178
179
/*--------------------------------------------------------------------*/
180
181
static void
182 879410
vsmw_mkent(const struct vsmw *vsmw, const char *pfx)
183
{
184
        int fd;
185
        uint64_t rn;
186
187 879410
        AN(pfx);
188 879410
        vsmw_assert_lock();
189 879410
        while (1) {
190 879410
                VSB_clear(vsmw->vsb);
191 879410
                VSB_printf(vsmw->vsb, "_.%s", pfx);
192 879410
                AZ(VRND_RandomCrypto(&rn, sizeof rn));
193 879410
                VSB_printf(vsmw->vsb, ".%016jx", (uintmax_t)rn);
194 879410
                AZ(VSB_finish(vsmw->vsb));
195 879410
                fd = openat(vsmw->vdirfd, VSB_data(vsmw->vsb), O_RDONLY);
196 879410
                if (fd < 0 && errno == ENOENT)
197 879410
                        return;
198 0
                if (fd >= 0)
199 0
                        closefd(&fd);
200
        }
201
}
202
203
/*--------------------------------------------------------------------*/
204
205
static void
206 1028672
vsmw_append_record(struct vsmw *vsmw, struct vsmwseg *seg, char act)
207
{
208
        int fd;
209
210 1028672
        vsmw_assert_lock();
211 1028672
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
212 1028672
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
213 1028672
        fd = openat(vsmw->vdirfd, vsmw->idx, O_APPEND | O_WRONLY);
214 1028672
        assert(fd >= 0);
215 1028672
        VSB_clear(vsmw->vsb);
216 1028672
        vsmw_fmt_index(vsmw, seg, act);
217 1028672
        AZ(VSB_finish(vsmw->vsb));
218 1028672
        XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764
219 1028672
        closefd(&fd);
220 1028672
}
221
222
/*--------------------------------------------------------------------*/
223
224
static void
225 902792
vsmw_addseg(struct vsmw *vsmw, struct vsmwseg *seg)
226
{
227
228 902792
        vsmw_assert_lock();
229 902792
        VTAILQ_INSERT_TAIL(&vsmw->segs, seg, list);
230 902792
        vsmw_append_record(vsmw, seg, '+');
231 902792
        vsmw->nsegs++;
232 902792
}
233
234
/*--------------------------------------------------------------------*/
235
236
static void
237 126060
vsmw_delseg(struct vsmw *vsmw, struct vsmwseg *seg)
238
{
239 126060
        char *t = NULL;
240
        int fd;
241
        struct vsmwseg *s2;
242
243 126060
        vsmw_assert_lock();
244 126060
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
245 126060
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
246
247 126060
        VTAILQ_REMOVE(&vsmw->segs, seg, list);
248
249 126060
        vsmw->nsegs--;
250 126060
        if (vsmw->nsubs < 10 || vsmw->nsubs * 2 < vsmw->nsegs) {
251 125880
                vsmw_append_record(vsmw, seg, '-');
252 125880
                vsmw->nsubs++;
253 125880
        } else {
254 180
                vsmw_mkent(vsmw, vsmw->idx);
255 180
                REPLACE(t, VSB_data(vsmw->vsb));
256 360
                fd = openat(vsmw->vdirfd,
257 180
                    t, O_WRONLY|O_CREAT|O_EXCL, vsmw->mode);
258 180
                assert(fd >= 0);
259 180
                vsmw_idx_head(vsmw, fd);
260 180
                VSB_clear(vsmw->vsb);
261 1296
                VTAILQ_FOREACH(s2, &vsmw->segs, list)
262 1116
                        vsmw_fmt_index(vsmw, s2, '+');
263 180
                AZ(VSB_finish(vsmw->vsb));
264 180
                XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764
265 180
                closefd(&fd);
266 180
                AZ(renameat(vsmw->vdirfd, t, vsmw->vdirfd, vsmw->idx));
267 180
                REPLACE(t, NULL);
268 180
                vsmw->nsubs = 0;
269
        }
270 126060
        REPLACE(seg->category, NULL);
271 126060
        REPLACE(seg->id, NULL);
272 126060
        FREE_OBJ(seg);
273 126060
}
274
275
/*--------------------------------------------------------------------*/
276
277
#ifdef RLIMIT_MEMLOCK
278
static void
279 35172
printlim(const char *name, rlim_t lim)
280
{
281
282 35172
        fprintf(stderr, "Info: %s: ", name);
283 35172
        if (lim == RLIM_INFINITY)
284 35172
                fprintf(stderr, "unlimited\n");
285
        else
286 0
                fprintf(stderr, "%ju bytes\n", (uintmax_t)lim);
287 35172
}
288
289
static void
290 17586
printmemlock(void) {
291
        struct rlimit rlim;
292
293 17586
        AZ(getrlimit(RLIMIT_MEMLOCK, &rlim));
294 17586
        printlim("max locked memory (soft)", rlim.rlim_cur);
295 17586
        printlim("max locked memory (hard)", rlim.rlim_max);
296 17586
}
297
#else
298
static void printmemlock(void) {}
299
#endif
300
301
static struct vsmw_cluster *
302 879230
vsmw_newcluster(struct vsmw *vsmw, size_t len, const char *pfx)
303
{
304
        struct vsmw_cluster *vc;
305
        static int warn = 0;
306
        int fd;
307
        size_t ps;
308
309 879230
        vsmw_assert_lock();
310 879230
        ALLOC_OBJ(vc, VSMW_CLUSTER_MAGIC);
311 879230
        AN(vc);
312
313 879230
        vsmw_mkent(vsmw, pfx);
314 879230
        REPLACE(vc->fn, VSB_data(vsmw->vsb));
315
316 879230
        VTAILQ_INSERT_TAIL(&vsmw->clusters, vc, list);
317
318 879230
        ps = getpagesize();
319 879230
        len = RUP2(len, ps);
320 879230
        vc->len = len;
321
322 1758460
        fd = openat(vsmw->vdirfd, vc->fn,
323 879230
            O_RDWR | O_CREAT | O_EXCL, vsmw->mode);
324 879230
        assert(fd >= 0);
325
326 879230
        AZ(VFIL_allocate(fd, (off_t)len, 1));
327
328 1758460
        vc->ptr = (void *)mmap(NULL, len,
329
            PROT_READ|PROT_WRITE,
330
            MAP_HASSEMAPHORE | MAP_NOSYNC | MAP_SHARED,
331 879230
            fd, 0);
332
333 879230
        closefd(&fd);
334 879230
        assert(vc->ptr != MAP_FAILED);
335 879230
        if (mlock(vc->ptr, len) && warn++ == 0)  {
336 35172
                fprintf(stderr, "Warning: mlock() of VSM failed: %s (%d)\n",
337 17586
                    VAS_errtxt(errno), errno);
338 17586
                printmemlock();
339 17586
        }
340
341 879230
        return (vc);
342
}
343
344
struct vsmw_cluster *
345 22230
VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx)
346
{
347
        struct vsmw_cluster *vc;
348
        struct vsmwseg *seg;
349
350 22230
        vsmw_do_lock();
351 22230
        vc = vsmw_newcluster(vsmw, len + VSM_CLUSTER_OFFSET, pfx);
352 22230
        AN(vc);
353 22230
        vc->next += VSM_CLUSTER_OFFSET;
354
355 22230
        ALLOC_OBJ(seg, VSMWSEG_MAGIC);
356 22230
        AN(seg);
357 22230
        vc->cseg = seg;
358 22230
        seg->len = vc->len;
359 22230
        seg->cluster = vc;
360 22230
        REPLACE(seg->category, "");
361 22230
        REPLACE(seg->id, "");
362 22230
        vc->refs++;
363 22230
        vc->named = 1;
364 22230
        vsmw_addseg(vsmw, seg);
365
366 22230
        vsmw_do_unlock();
367 22230
        return (vc);
368
}
369
370
static void
371 36984
vsmw_DestroyCluster_locked(struct vsmw *vsmw, struct vsmw_cluster *vc)
372
{
373
374 36984
        vsmw_assert_lock();
375 36984
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
376 36984
        CHECK_OBJ_NOTNULL(vc, VSMW_CLUSTER_MAGIC);
377
378 36984
        AZ(vc->refs);
379
380 36984
        AZ(munmap(vc->ptr, vc->len));
381 36984
        if (vc->named)
382 1398
                vsmw_delseg(vsmw, vc->cseg);
383 36984
        vc->cseg = 0;
384
385 36984
        VTAILQ_REMOVE(&vsmw->clusters, vc, list);
386 36984
        if (unlinkat(vsmw->vdirfd, vc->fn, 0))
387 0
                assert (errno == ENOENT);
388 36984
        REPLACE(vc->fn, NULL);
389 36984
        FREE_OBJ(vc);
390 36984
}
391
392
void
393 1398
VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vsmcp)
394
{
395
        struct vsmw_cluster *vc;
396
397 1398
        TAKE_OBJ_NOTNULL(vc, vsmcp, VSMW_CLUSTER_MAGIC);
398
399 1398
        vsmw_do_lock();
400 1398
        if (--vc->refs == 0)
401 774
                vsmw_DestroyCluster_locked(vsmw, vc);
402 1398
        vsmw_do_unlock();
403 1398
}
404
405
/*--------------------------------------------------------------------*/
406
407
void *
408 880562
VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc,
409
    const char *category, size_t payload, const char *prefix,
410
    const char *fmt, va_list va)
411
{
412
        struct vsmwseg *seg;
413
        ssize_t l;
414
415 880562
        vsmw_do_lock();
416 880562
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
417
418 880562
        ALLOC_OBJ(seg, VSMWSEG_MAGIC);
419 880562
        AN(seg);
420 880562
        REPLACE(seg->category, category);
421 880562
        seg->len = PRNDUP(payload);
422
423 880562
        VSB_clear(vsmw->vsb);
424 880562
        if (prefix != NULL) {
425 639545
                assert(prefix[0] != '\0');
426 639545
                VSB_cat(vsmw->vsb, prefix);
427 639545
                if (fmt[0] != '\0')
428 587714
                        VSB_cat(vsmw->vsb, ".");
429 639545
        }
430 880562
        l = VSB_len(vsmw->vsb);
431 880562
        assert(l >= 0);
432 880562
        VSB_vprintf(vsmw->vsb, fmt, va);
433 880562
        AZ(VSB_finish(vsmw->vsb));
434 880562
        assert(fmt[0] == '\0' || l < VSB_len(vsmw->vsb));
435
436 880562
        REPLACE(seg->id, VSB_data(vsmw->vsb));
437
438 880562
        if (vc == NULL)
439 857000
                vc = vsmw_newcluster(vsmw, seg->len, category);
440 880562
        AN(vc);
441 880562
        vc->refs++;
442
443 880562
        seg->cluster = vc;
444 880562
        seg->off = vc->next;
445 880562
        vc->next += seg->len;
446 880562
        assert(vc->next <= vc->len);
447 880562
        seg->ptr = seg->off + (char*)vc->ptr;
448
449 880562
        vsmw_addseg(vsmw, seg);
450
451 880562
        vsmw_do_unlock();
452 880562
        return (seg->ptr);
453
}
454
455
void *
456 105026
VSMW_Allocf(struct vsmw *vsmw, struct vsmw_cluster *vc,
457
    const char *category, size_t len, const char *fmt, ...)
458
{
459
        va_list ap;
460
        void *p;
461
462 105026
        va_start(ap, fmt);
463 105026
        p = VSMW_Allocv(vsmw, vc, category, len, NULL, fmt, ap);
464 105026
        va_end(ap);
465 105026
        return (p);
466
}
467
468
/*--------------------------------------------------------------------*/
469
470
void
471 36750
VSMW_Free(struct vsmw *vsmw, void **pp)
472
{
473
        struct vsmwseg *seg;
474
        struct vsmw_cluster *cp;
475
476 36750
        vsmw_do_lock();
477 36750
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
478 36750
        AN(pp);
479 299414
        VTAILQ_FOREACH(seg, &vsmw->segs, list)
480 299414
                if (seg->ptr == *pp)
481 36750
                        break;
482 36750
        AN(seg);
483 36750
        *pp = NULL;
484
485 36750
        cp = seg->cluster;
486 36750
        CHECK_OBJ_NOTNULL(cp, VSMW_CLUSTER_MAGIC);
487 36750
        assert(cp->refs > 0);
488
489 36750
        vsmw_delseg(vsmw, seg);
490
491 36750
        if (!--cp->refs)
492 36210
                vsmw_DestroyCluster_locked(vsmw, cp);
493 36750
        vsmw_do_unlock();
494 36750
}
495
496
/*--------------------------------------------------------------------*/
497
498
struct vsmw *
499 34700
VSMW_New(int vdirfd, int mode, const char *idxname)
500
{
501
        struct vsmw *vsmw;
502
        int fd;
503
504 34700
        assert(vdirfd > 0);
505 34700
        assert(mode > 0);
506 34700
        AN(idxname);
507
508 34700
        vsmw_do_lock();
509 34700
        ALLOC_OBJ(vsmw, VSMW_MAGIC);
510 34700
        AN(vsmw);
511
512 34700
        VTAILQ_INIT(&vsmw->segs);
513 34700
        VTAILQ_INIT(&vsmw->clusters);
514 34700
        vsmw->vsb = VSB_new_auto();
515 34700
        AN(vsmw->vsb);
516 34700
        REPLACE(vsmw->idx, idxname);
517 34700
        vsmw->mode = mode;
518 34700
        vsmw->vdirfd = vdirfd;
519 34700
        vsmw->pid = getpid();
520 34700
        vsmw->birth = time(NULL);
521
522 34700
        if (unlinkat(vdirfd, vsmw->idx, 0))
523 34700
                assert (errno == ENOENT);
524 69400
        fd = openat(vdirfd,
525 34700
            vsmw->idx, O_APPEND | O_WRONLY | O_CREAT, vsmw->mode);
526 34700
        assert(fd >= 0);
527 34700
        vsmw_idx_head(vsmw, fd);
528 34700
        closefd(&fd);
529
530 34700
        vsmw_do_unlock();
531 34700
        return (vsmw);
532
}
533
534
void
535 17586
VSMW_Destroy(struct vsmw **pp)
536
{
537
        struct vsmw *vsmw;
538
        struct vsmwseg *seg, *s2;
539
540 17586
        vsmw_do_lock();
541 17586
        TAKE_OBJ_NOTNULL(vsmw, pp, VSMW_MAGIC);
542 105498
        VTAILQ_FOREACH_SAFE(seg, &vsmw->segs, list, s2)
543 87912
                vsmw_delseg(vsmw, seg);
544 17586
        if (unlinkat(vsmw->vdirfd, vsmw->idx, 0))
545 0
                assert (errno == ENOENT);
546 17586
        REPLACE(vsmw->idx, NULL);
547 17586
        VSB_destroy(&vsmw->vsb);
548 17586
        closefd(&vsmw->vdirfd);
549 17586
        FREE_OBJ(vsmw);
550 17586
        vsmw_do_unlock();
551 17586
}