varnish-cache/bin/varnishd/common/common_vsmw.c
0
/*-
1
 * Copyright (c) 2010-2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * VSM stuff common to manager and child.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <fcntl.h>
36
#include <stdarg.h>
37
#include <stdio.h>
38
#include <stdint.h>
39
#include <stdlib.h>
40
#include <string.h>
41
#include <time.h>
42
#include <unistd.h>
43
#include <sys/mman.h>
44
#include <sys/resource.h>
45
#include <sys/stat.h>
46
47
#include "vdef.h"
48
#include "vas.h"
49
#include "vsb.h"
50
#include "miniobj.h"
51
#include "vqueue.h"
52
53
#include "vfil.h"
54
#include "vrnd.h"
55
56
#include "heritage.h"
57
#include "vsmw.h"
58
59
#ifndef MAP_HASSEMAPHORE
60
#  define MAP_HASSEMAPHORE 0 /* XXX Linux */
61
#endif
62
63
#ifndef MAP_NOSYNC
64
#  define MAP_NOSYNC 0 /* XXX Linux */
65
#endif
66
67
static void v_matchproto_(vsm_lock_f)
68 42808
vsmw_dummy_lock(void)
69
{
70 42808
}
71
72
static int vsmw_haslock;
73
vsm_lock_f *vsmw_lock = vsmw_dummy_lock;
74
vsm_lock_f *vsmw_unlock = vsmw_dummy_lock;
75
76
#define vsmw_assert_lock()      AN(vsmw_haslock)
77
78
#define vsmw_do_lock() vsmw_do_lock_(__func__, __LINE__)
79
80
#define vsmw_do_lock_(f, l)                             \
81
        do {                                    \
82
                vsmw_lock();                    \
83
                AZ(vsmw_haslock);               \
84
                vsmw_haslock = 1;               \
85
        } while(0)
86
87
#define vsmw_do_unlock() vsmw_do_unlock_(__func__, __LINE__)
88
#define vsmw_do_unlock_(f, l)                           \
89
        do {                                    \
90
                AN(vsmw_haslock);               \
91
                vsmw_haslock = 0;               \
92
                vsmw_unlock();                  \
93
        } while(0)
94
95
/*--------------------------------------------------------------------*/
96
97
struct vsmw_cluster {
98
        unsigned                        magic;
99
#define VSMW_CLUSTER_MAGIC              0x28b74c00
100
101
        VTAILQ_ENTRY(vsmw_cluster)      list;
102
        struct vsmwseg                  *cseg;
103
        char                            *fn;
104
        size_t                          len;
105
        void                            *ptr;
106
        size_t                          next;
107
        int                             refs;
108
        int                             named;
109
};
110
111
struct vsmwseg {
112
        unsigned                        magic;
113
#define VSMWSEG_MAGIC                   0x7e4ccaea
114
        VTAILQ_ENTRY(vsmwseg)           list;
115
        struct vsmw_cluster             *cluster;
116
117
        char                            *category;
118
        size_t                          off;
119
        size_t                          len;
120
        char                            *id;
121
        void                            *ptr;
122
};
123
124
struct vsmw {
125
        unsigned                        magic;
126
#define VSMW_MAGIC                      0xc2ca2cd9
127
        int                             vdirfd;
128
        int                             mode;
129
        char                            *idx;
130
        VTAILQ_HEAD(, vsmw_cluster)     clusters;
131
        VTAILQ_HEAD(, vsmwseg)          segs;
132
        struct vsb                      *vsb;
133
        pid_t                           pid;
134
        time_t                          birth;
135
        uint64_t                        nsegs;
136
        uint64_t                        nsubs;
137
};
138
139
/* Allocations in clusters never start at offset zero */
140
#define VSM_CLUSTER_OFFSET 16
141
142
/*--------------------------------------------------------------------*/
143
144
static void
145 3859
vsmw_idx_head(const struct vsmw *vsmw, int fd)
146
{
147
        char buf[64];
148
149 3859
        bprintf(buf, "# %jd %jd\n", (intmax_t)vsmw->pid, (intmax_t)vsmw->birth);
150
        // XXX handle ENOSPC? #2764
151 3859
        assert(write(fd, buf, strlen(buf)) == strlen(buf));
152 3859
}
153
154
#define ASSERT_SEG_STR(x) do {                  \
155
                AN(x);                          \
156
                AZ(strchr(x, '\n'));            \
157
        } while (0);
158
159
static void
160 113922
vsmw_fmt_index(const struct vsmw *vsmw, const struct vsmwseg *seg, char act)
161
{
162
163 113922
        vsmw_assert_lock();
164 113922
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
165 113922
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
166 113922
        AN(seg->cluster);
167 113922
        ASSERT_SEG_STR(seg->category);
168 113922
        ASSERT_SEG_STR(seg->id);
169
170 227844
        VSB_printf(vsmw->vsb, "%c %s %zu %zu %s %s\n",
171 113922
            act,
172 113922
            seg->cluster->fn,
173 113922
            seg->off,
174 113922
            seg->len,
175 113922
            seg->category,
176 113922
            seg->id);
177 113922
}
178
179
/*--------------------------------------------------------------------*/
180
181
static void
182 97280
vsmw_mkent(const struct vsmw *vsmw, const char *pfx)
183
{
184
        int fd;
185
        uint64_t rn;
186
187 97280
        AN(pfx);
188 97280
        vsmw_assert_lock();
189 97280
        while (1) {
190 97280
                VSB_clear(vsmw->vsb);
191 97280
                VSB_printf(vsmw->vsb, "_.%s", pfx);
192 97280
                AZ(VRND_RandomCrypto(&rn, sizeof rn));
193 97280
                VSB_printf(vsmw->vsb, ".%016jx", (uintmax_t)rn);
194 97280
                AZ(VSB_finish(vsmw->vsb));
195 97280
                fd = openat(vsmw->vdirfd, VSB_data(vsmw->vsb), O_RDONLY);
196 97280
                if (fd < 0 && errno == ENOENT)
197 97280
                        return;
198 0
                if (fd >= 0)
199 0
                        closefd(&fd);
200
        }
201
}
202
203
/*--------------------------------------------------------------------*/
204
205
static void
206 113798
vsmw_append_record(struct vsmw *vsmw, struct vsmwseg *seg, char act)
207
{
208
        int fd;
209
210 113798
        vsmw_assert_lock();
211 113798
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
212 113798
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
213 113798
        fd = openat(vsmw->vdirfd, vsmw->idx, O_APPEND | O_WRONLY);
214 113798
        assert(fd >= 0);
215 113798
        VSB_clear(vsmw->vsb);
216 113798
        vsmw_fmt_index(vsmw, seg, act);
217 113798
        AZ(VSB_finish(vsmw->vsb));
218 113798
        XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764
219 113798
        closefd(&fd);
220 113798
}
221
222
/*--------------------------------------------------------------------*/
223
224
static void
225 99866
vsmw_addseg(struct vsmw *vsmw, struct vsmwseg *seg)
226
{
227
228 99866
        vsmw_assert_lock();
229 99866
        VTAILQ_INSERT_TAIL(&vsmw->segs, seg, list);
230 99866
        vsmw_append_record(vsmw, seg, '+');
231 99866
        vsmw->nsegs++;
232 99866
}
233
234
/*--------------------------------------------------------------------*/
235
236
static void
237 13952
vsmw_delseg(struct vsmw *vsmw, struct vsmwseg *seg)
238
{
239 13952
        char *t = NULL;
240
        int fd;
241
        struct vsmwseg *s2;
242
243 13952
        vsmw_assert_lock();
244 13952
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
245 13952
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
246
247 13952
        VTAILQ_REMOVE(&vsmw->segs, seg, list);
248
249 13952
        vsmw->nsegs--;
250 13952
        if (vsmw->nsubs < 10 || vsmw->nsubs * 2 < vsmw->nsegs) {
251 13932
                vsmw_append_record(vsmw, seg, '-');
252 13932
                vsmw->nsubs++;
253 13932
        } else {
254 20
                vsmw_mkent(vsmw, vsmw->idx);
255 20
                REPLACE(t, VSB_data(vsmw->vsb));
256 40
                fd = openat(vsmw->vdirfd,
257 20
                    t, O_WRONLY|O_CREAT|O_EXCL, vsmw->mode);
258 20
                assert(fd >= 0);
259 20
                vsmw_idx_head(vsmw, fd);
260 20
                VSB_clear(vsmw->vsb);
261 144
                VTAILQ_FOREACH(s2, &vsmw->segs, list)
262 124
                        vsmw_fmt_index(vsmw, s2, '+');
263 20
                AZ(VSB_finish(vsmw->vsb));
264 20
                XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764
265 20
                closefd(&fd);
266 20
                AZ(renameat(vsmw->vdirfd, t, vsmw->vdirfd, vsmw->idx));
267 20
                REPLACE(t, NULL);
268 20
                vsmw->nsubs = 0;
269
        }
270 13952
        REPLACE(seg->category, NULL);
271 13952
        REPLACE(seg->id, NULL);
272 13952
        FREE_OBJ(seg);
273 13952
}
274
275
/*--------------------------------------------------------------------*/
276
277
#ifdef RLIMIT_MEMLOCK
278
static void
279 3892
printlim(const char *name, rlim_t lim)
280
{
281
282 3892
        fprintf(stderr, "Info: %s: ", name);
283 3892
        if (lim == RLIM_INFINITY)
284 3892
                fprintf(stderr, "unlimited\n");
285
        else
286 0
                fprintf(stderr, "%ju bytes\n", (uintmax_t)lim);
287 3892
}
288
289
static void
290 1946
printmemlock(void) {
291
        struct rlimit rlim;
292
293 1946
        AZ(getrlimit(RLIMIT_MEMLOCK, &rlim));
294 1946
        printlim("max locked memory (soft)", rlim.rlim_cur);
295 1946
        printlim("max locked memory (hard)", rlim.rlim_max);
296 1946
}
297
#else
298
static void printmemlock(void) {}
299
#endif
300
301
static struct vsmw_cluster *
302 97260
vsmw_newcluster(struct vsmw *vsmw, size_t len, const char *pfx)
303
{
304
        struct vsmw_cluster *vc;
305
        static int warn = 0;
306
        int fd;
307
        size_t ps;
308
309 97260
        vsmw_assert_lock();
310 97260
        ALLOC_OBJ(vc, VSMW_CLUSTER_MAGIC);
311 97260
        AN(vc);
312
313 97260
        vsmw_mkent(vsmw, pfx);
314 97260
        REPLACE(vc->fn, VSB_data(vsmw->vsb));
315
316 97260
        VTAILQ_INSERT_TAIL(&vsmw->clusters, vc, list);
317
318 97260
        ps = getpagesize();
319 97260
        len = RUP2(len, ps);
320 97260
        vc->len = len;
321
322 194520
        fd = openat(vsmw->vdirfd, vc->fn,
323 97260
            O_RDWR | O_CREAT | O_EXCL, vsmw->mode);
324 97260
        assert(fd >= 0);
325
326 97260
        AZ(VFIL_allocate(fd, (off_t)len, 1));
327
328 194520
        vc->ptr = (void *)mmap(NULL, len,
329
            PROT_READ|PROT_WRITE,
330
            MAP_HASSEMAPHORE | MAP_NOSYNC | MAP_SHARED,
331 97260
            fd, 0);
332
333 97260
        closefd(&fd);
334 97260
        assert(vc->ptr != MAP_FAILED);
335 97260
        if (mlock(vc->ptr, len) && warn++ == 0)  {
336 3892
                fprintf(stderr, "Warning: mlock() of VSM failed: %s (%d)\n",
337 1946
                    VAS_errtxt(errno), errno);
338 1946
                printmemlock();
339 1946
        }
340
341 97260
        return (vc);
342
}
343
344
struct vsmw_cluster *
345 2456
VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx)
346
{
347
        struct vsmw_cluster *vc;
348
        struct vsmwseg *seg;
349
350 2456
        vsmw_do_lock();
351 2456
        vc = vsmw_newcluster(vsmw, len + VSM_CLUSTER_OFFSET, pfx);
352 2456
        AN(vc);
353 2456
        vc->next += VSM_CLUSTER_OFFSET;
354
355 2456
        ALLOC_OBJ(seg, VSMWSEG_MAGIC);
356 2456
        AN(seg);
357 2456
        vc->cseg = seg;
358 2456
        seg->len = vc->len;
359 2456
        seg->cluster = vc;
360 2456
        REPLACE(seg->category, "");
361 2456
        REPLACE(seg->id, "");
362 2456
        vc->refs++;
363 2456
        vc->named = 1;
364 2456
        vsmw_addseg(vsmw, seg);
365
366 2456
        vsmw_do_unlock();
367 2456
        return (vc);
368
}
369
370
static void
371 4094
vsmw_DestroyCluster_locked(struct vsmw *vsmw, struct vsmw_cluster *vc)
372
{
373
374 4094
        vsmw_assert_lock();
375 4094
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
376 4094
        CHECK_OBJ_NOTNULL(vc, VSMW_CLUSTER_MAGIC);
377
378 4094
        AZ(vc->refs);
379
380 4094
        AZ(munmap(vc->ptr, vc->len));
381 4094
        if (vc->named)
382 156
                vsmw_delseg(vsmw, vc->cseg);
383 4094
        vc->cseg = 0;
384
385 4094
        VTAILQ_REMOVE(&vsmw->clusters, vc, list);
386 4094
        if (unlinkat(vsmw->vdirfd, vc->fn, 0))
387 0
                assert (errno == ENOENT);
388 4094
        REPLACE(vc->fn, NULL);
389 4094
        FREE_OBJ(vc);
390 4094
}
391
392
void
393 156
VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vsmcp)
394
{
395
        struct vsmw_cluster *vc;
396
397 156
        TAKE_OBJ_NOTNULL(vc, vsmcp, VSMW_CLUSTER_MAGIC);
398
399 156
        vsmw_do_lock();
400 156
        if (--vc->refs == 0)
401 86
                vsmw_DestroyCluster_locked(vsmw, vc);
402 156
        vsmw_do_unlock();
403 156
}
404
405
/*--------------------------------------------------------------------*/
406
407
void *
408 97410
VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc,
409
    const char *category, size_t payload, const char *prefix,
410
    const char *fmt, va_list va)
411
{
412
        struct vsmwseg *seg;
413
        ssize_t l;
414
415 97410
        vsmw_do_lock();
416 97410
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
417
418 97410
        ALLOC_OBJ(seg, VSMWSEG_MAGIC);
419 97410
        AN(seg);
420 97410
        REPLACE(seg->category, category);
421 97410
        seg->len = PRNDUP(payload);
422
423 97410
        VSB_clear(vsmw->vsb);
424 97410
        if (prefix != NULL) {
425 70745
                assert(prefix[0] != '\0');
426 70745
                VSB_cat(vsmw->vsb, prefix);
427 70745
                if (fmt[0] != '\0')
428 65011
                        VSB_cat(vsmw->vsb, ".");
429 70745
        }
430 97410
        l = VSB_len(vsmw->vsb);
431 97410
        assert(l >= 0);
432 97410
        VSB_vprintf(vsmw->vsb, fmt, va);
433 97410
        AZ(VSB_finish(vsmw->vsb));
434 97410
        assert(fmt[0] == '\0' || l < VSB_len(vsmw->vsb));
435
436 97410
        REPLACE(seg->id, VSB_data(vsmw->vsb));
437
438 97410
        if (vc == NULL)
439 94804
                vc = vsmw_newcluster(vsmw, seg->len, category);
440 97410
        AN(vc);
441 97410
        vc->refs++;
442
443 97410
        seg->cluster = vc;
444 97410
        seg->off = vc->next;
445 97410
        vc->next += seg->len;
446 97410
        assert(vc->next <= vc->len);
447 97410
        seg->ptr = seg->off + (char*)vc->ptr;
448
449 97410
        vsmw_addseg(vsmw, seg);
450
451 97410
        vsmw_do_unlock();
452 97410
        return (seg->ptr);
453
}
454
455
void *
456 11621
VSMW_Allocf(struct vsmw *vsmw, struct vsmw_cluster *vc,
457
    const char *category, size_t len, const char *fmt, ...)
458
{
459
        va_list ap;
460
        void *p;
461
462 11621
        va_start(ap, fmt);
463 11621
        p = VSMW_Allocv(vsmw, vc, category, len, NULL, fmt, ap);
464 11621
        va_end(ap);
465 11621
        return (p);
466
}
467
468
/*--------------------------------------------------------------------*/
469
470
void
471 4068
VSMW_Free(struct vsmw *vsmw, void **pp)
472
{
473
        struct vsmwseg *seg;
474
        struct vsmw_cluster *cp;
475
476 4068
        vsmw_do_lock();
477 4068
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
478 4068
        AN(pp);
479 33180
        VTAILQ_FOREACH(seg, &vsmw->segs, list)
480 33180
                if (seg->ptr == *pp)
481 4068
                        break;
482 4068
        AN(seg);
483 4068
        *pp = NULL;
484
485 4068
        cp = seg->cluster;
486 4068
        CHECK_OBJ_NOTNULL(cp, VSMW_CLUSTER_MAGIC);
487 4068
        assert(cp->refs > 0);
488
489 4068
        vsmw_delseg(vsmw, seg);
490
491 4068
        if (!--cp->refs)
492 4008
                vsmw_DestroyCluster_locked(vsmw, cp);
493 4068
        vsmw_do_unlock();
494 4068
}
495
496
/*--------------------------------------------------------------------*/
497
498
struct vsmw *
499 3839
VSMW_New(int vdirfd, int mode, const char *idxname)
500
{
501
        struct vsmw *vsmw;
502
        int fd;
503
504 3839
        assert(vdirfd > 0);
505 3839
        assert(mode > 0);
506 3839
        AN(idxname);
507
508 3839
        vsmw_do_lock();
509 3839
        ALLOC_OBJ(vsmw, VSMW_MAGIC);
510 3839
        AN(vsmw);
511
512 3839
        VTAILQ_INIT(&vsmw->segs);
513 3839
        VTAILQ_INIT(&vsmw->clusters);
514 3839
        vsmw->vsb = VSB_new_auto();
515 3839
        AN(vsmw->vsb);
516 3839
        REPLACE(vsmw->idx, idxname);
517 3839
        vsmw->mode = mode;
518 3839
        vsmw->vdirfd = vdirfd;
519 3839
        vsmw->pid = getpid();
520 3839
        vsmw->birth = time(NULL);
521
522 3839
        if (unlinkat(vdirfd, vsmw->idx, 0))
523 3839
                assert (errno == ENOENT);
524 7678
        fd = openat(vdirfd,
525 3839
            vsmw->idx, O_APPEND | O_WRONLY | O_CREAT, vsmw->mode);
526 3839
        assert(fd >= 0);
527 3839
        vsmw_idx_head(vsmw, fd);
528 3839
        closefd(&fd);
529
530 3839
        vsmw_do_unlock();
531 3839
        return (vsmw);
532
}
533
534
void
535 1946
VSMW_Destroy(struct vsmw **pp)
536
{
537
        struct vsmw *vsmw;
538
        struct vsmwseg *seg, *s2;
539
540 1946
        vsmw_do_lock();
541 1946
        TAKE_OBJ_NOTNULL(vsmw, pp, VSMW_MAGIC);
542 11674
        VTAILQ_FOREACH_SAFE(seg, &vsmw->segs, list, s2)
543 9728
                vsmw_delseg(vsmw, seg);
544 1946
        if (unlinkat(vsmw->vdirfd, vsmw->idx, 0))
545 0
                assert (errno == ENOENT);
546 1946
        REPLACE(vsmw->idx, NULL);
547 1946
        VSB_destroy(&vsmw->vsb);
548 1946
        closefd(&vsmw->vdirfd);
549 1946
        FREE_OBJ(vsmw);
550 1946
        vsmw_do_unlock();
551 1946
}