varnish-cache/bin/varnishd/common/common_vsmw.c
0
/*-
1
 * Copyright (c) 2010-2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * VSM stuff common to manager and child.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <fcntl.h>
36
#include <stdarg.h>
37
#include <stdio.h>
38
#include <stdint.h>
39
#include <stdlib.h>
40
#include <string.h>
41
#include <time.h>
42
#include <unistd.h>
43
#include <sys/mman.h>
44
#include <sys/resource.h>
45
#include <sys/stat.h>
46
47
#include "vdef.h"
48
#include "vas.h"
49
#include "vsb.h"
50
#include "miniobj.h"
51
#include "vqueue.h"
52
53
#include "vfil.h"
54
#include "vrnd.h"
55
56
#include "heritage.h"
57
#include "vsmw.h"
58
59
#ifndef MAP_HASSEMAPHORE
60
#  define MAP_HASSEMAPHORE 0 /* XXX Linux */
61
#endif
62
63
#ifndef MAP_NOSYNC
64
#  define MAP_NOSYNC 0 /* XXX Linux */
65
#endif
66
67
static void v_matchproto_(vsm_lock_f)
68 21154
vsmw_dummy_lock(void)
69
{
70 21154
}
71
72
static int vsmw_haslock;
73
vsm_lock_f *vsmw_lock = vsmw_dummy_lock;
74
vsm_lock_f *vsmw_unlock = vsmw_dummy_lock;
75
76
#define vsmw_assert_lock()      AN(vsmw_haslock)
77
78
#define vsmw_do_lock() vsmw_do_lock_(__func__, __LINE__)
79
80
#define vsmw_do_lock_(f, l)                             \
81
        do {                                    \
82
                vsmw_lock();                    \
83
                AZ(vsmw_haslock);               \
84
                vsmw_haslock = 1;               \
85
        } while(0)
86
87
#define vsmw_do_unlock() vsmw_do_unlock_(__func__, __LINE__)
88
#define vsmw_do_unlock_(f, l)                           \
89
        do {                                    \
90
                AN(vsmw_haslock);               \
91
                vsmw_haslock = 0;               \
92
                vsmw_unlock();                  \
93
        } while(0)
94
95
/*--------------------------------------------------------------------*/
96
97
struct vsmw_cluster {
98
        unsigned                        magic;
99
#define VSMW_CLUSTER_MAGIC              0x28b74c00
100
101
        VTAILQ_ENTRY(vsmw_cluster)      list;
102
        struct vsmwseg                  *cseg;
103
        char                            *fn;
104
        size_t                          len;
105
        void                            *ptr;
106
        size_t                          next;
107
        int                             refs;
108
        int                             named;
109
};
110
111
struct vsmwseg {
112
        unsigned                        magic;
113
#define VSMWSEG_MAGIC                   0x7e4ccaea
114
        VTAILQ_ENTRY(vsmwseg)           list;
115
        struct vsmw_cluster             *cluster;
116
117
        char                            *category;
118
        size_t                          off;
119
        size_t                          len;
120
        char                            *id;
121
        void                            *ptr;
122
};
123
124
struct vsmw {
125
        unsigned                        magic;
126
#define VSMW_MAGIC                      0xc2ca2cd9
127
        int                             vdirfd;
128
        int                             mode;
129
        char                            *idx;
130
        VTAILQ_HEAD(, vsmw_cluster)     clusters;
131
        VTAILQ_HEAD(, vsmwseg)          segs;
132
        struct vsb                      *vsb;
133
        pid_t                           pid;
134
        time_t                          birth;
135
        uint64_t                        nsegs;
136
        uint64_t                        nsubs;
137
};
138
139
/* Allocations in clusters never start at offset zero */
140
#define VSM_CLUSTER_OFFSET 16
141
142
/*--------------------------------------------------------------------*/
143
144
static void
145 1907
vsmw_idx_head(const struct vsmw *vsmw, int fd)
146
{
147
        char buf[64];
148
149 1907
        bprintf(buf, "# %jd %jd\n", (intmax_t)vsmw->pid, (intmax_t)vsmw->birth);
150
        // XXX handle ENOSPC? #2764
151 1907
        assert(write(fd, buf, strlen(buf)) == strlen(buf));
152 1907
}
153
154
#define ASSERT_SEG_STR(x) do {                  \
155
                AN(x);                          \
156
                AZ(strchr(x, '\n'));            \
157
        } while (0);
158
159
static void
160 56282
vsmw_fmt_index(const struct vsmw *vsmw, const struct vsmwseg *seg, char act)
161
{
162
163 56282
        vsmw_assert_lock();
164 56282
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
165 56282
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
166 56282
        AN(seg->cluster);
167 56282
        ASSERT_SEG_STR(seg->category);
168 56282
        ASSERT_SEG_STR(seg->id);
169
170 112564
        VSB_printf(vsmw->vsb, "%c %s %zu %zu %s %s\n",
171 56282
            act,
172 56282
            seg->cluster->fn,
173 56282
            seg->off,
174 56282
            seg->len,
175 56282
            seg->category,
176 56282
            seg->id);
177 56282
}
178
179
/*--------------------------------------------------------------------*/
180
181
static void
182 48057
vsmw_mkent(const struct vsmw *vsmw, const char *pfx)
183
{
184
        int fd;
185
        uint64_t rn;
186
187 48057
        AN(pfx);
188 48057
        vsmw_assert_lock();
189 48057
        while (1) {
190 48057
                VSB_clear(vsmw->vsb);
191 48057
                VSB_printf(vsmw->vsb, "_.%s", pfx);
192 48057
                AZ(VRND_RandomCrypto(&rn, sizeof rn));
193 48057
                VSB_printf(vsmw->vsb, ".%016jx", (uintmax_t)rn);
194 48057
                AZ(VSB_finish(vsmw->vsb));
195 48057
                fd = openat(vsmw->vdirfd, VSB_data(vsmw->vsb), O_RDONLY);
196 48057
                if (fd < 0 && errno == ENOENT)
197 48057
                        return;
198 0
                if (fd >= 0)
199 0
                        closefd(&fd);
200
        }
201
}
202
203
/*--------------------------------------------------------------------*/
204
205
static void
206 56220
vsmw_append_record(struct vsmw *vsmw, struct vsmwseg *seg, char act)
207
{
208
        int fd;
209
210 56220
        vsmw_assert_lock();
211 56220
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
212 56220
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
213 56220
        fd = openat(vsmw->vdirfd, vsmw->idx, O_APPEND | O_WRONLY);
214 56220
        assert(fd >= 0);
215 56220
        VSB_clear(vsmw->vsb);
216 56220
        vsmw_fmt_index(vsmw, seg, act);
217 56220
        AZ(VSB_finish(vsmw->vsb));
218 56220
        XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764
219 56220
        closefd(&fd);
220 56220
}
221
222
/*--------------------------------------------------------------------*/
223
224
static void
225 49335
vsmw_addseg(struct vsmw *vsmw, struct vsmwseg *seg)
226
{
227
228 49335
        vsmw_assert_lock();
229 49335
        VTAILQ_INSERT_TAIL(&vsmw->segs, seg, list);
230 49335
        vsmw_append_record(vsmw, seg, '+');
231 49335
        vsmw->nsegs++;
232 49335
}
233
234
/*--------------------------------------------------------------------*/
235
236
static void
237 6895
vsmw_delseg(struct vsmw *vsmw, struct vsmwseg *seg)
238
{
239 6895
        char *t = NULL;
240
        int fd;
241
        struct vsmwseg *s2;
242
243 6895
        vsmw_assert_lock();
244 6895
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
245 6895
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
246
247 6895
        VTAILQ_REMOVE(&vsmw->segs, seg, list);
248
249 6895
        vsmw->nsegs--;
250 6895
        if (vsmw->nsubs < 10 || vsmw->nsubs * 2 < vsmw->nsegs) {
251 6885
                vsmw_append_record(vsmw, seg, '-');
252 6885
                vsmw->nsubs++;
253 6885
        } else {
254 10
                vsmw_mkent(vsmw, vsmw->idx);
255 10
                REPLACE(t, VSB_data(vsmw->vsb));
256 20
                fd = openat(vsmw->vdirfd,
257 10
                    t, O_WRONLY|O_CREAT|O_EXCL, vsmw->mode);
258 10
                assert(fd >= 0);
259 10
                vsmw_idx_head(vsmw, fd);
260 10
                VSB_clear(vsmw->vsb);
261 72
                VTAILQ_FOREACH(s2, &vsmw->segs, list)
262 62
                        vsmw_fmt_index(vsmw, s2, '+');
263 10
                AZ(VSB_finish(vsmw->vsb));
264 10
                XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764
265 10
                closefd(&fd);
266 10
                AZ(renameat(vsmw->vdirfd, t, vsmw->vdirfd, vsmw->idx));
267 10
                REPLACE(t, NULL);
268 10
                vsmw->nsubs = 0;
269
        }
270 6895
        REPLACE(seg->category, NULL);
271 6895
        REPLACE(seg->id, NULL);
272 6895
        FREE_OBJ(seg);
273 6895
}
274
275
/*--------------------------------------------------------------------*/
276
277
#ifdef RLIMIT_MEMLOCK
278
static void
279 1924
printlim(const char *name, rlim_t lim)
280
{
281
282 1924
        fprintf(stderr, "Info: %s: ", name);
283 1924
        if (lim == RLIM_INFINITY)
284 1924
                fprintf(stderr, "unlimited\n");
285
        else
286 0
                fprintf(stderr, "%ju bytes\n", (uintmax_t)lim);
287 1924
}
288
289
static void
290 962
printmemlock(void) {
291
        struct rlimit rlim;
292
293 962
        AZ(getrlimit(RLIMIT_MEMLOCK, &rlim));
294 962
        printlim("max locked memory (soft)", rlim.rlim_cur);
295 962
        printlim("max locked memory (hard)", rlim.rlim_max);
296 962
}
297
#else
298
static void printmemlock(void) {}
299
#endif
300
301
static struct vsmw_cluster *
302 48047
vsmw_newcluster(struct vsmw *vsmw, size_t len, const char *pfx)
303
{
304
        struct vsmw_cluster *vc;
305
        static int warn = 0;
306
        int fd;
307
        size_t ps;
308
309 48047
        vsmw_assert_lock();
310 48047
        ALLOC_OBJ(vc, VSMW_CLUSTER_MAGIC);
311 48047
        AN(vc);
312
313 48047
        vsmw_mkent(vsmw, pfx);
314 48047
        REPLACE(vc->fn, VSB_data(vsmw->vsb));
315
316 48047
        VTAILQ_INSERT_TAIL(&vsmw->clusters, vc, list);
317
318 48047
        ps = getpagesize();
319 48047
        len = RUP2(len, ps);
320 48047
        vc->len = len;
321
322 96094
        fd = openat(vsmw->vdirfd, vc->fn,
323 48047
            O_RDWR | O_CREAT | O_EXCL, vsmw->mode);
324 48047
        assert(fd >= 0);
325
326 48047
        AZ(VFIL_allocate(fd, (off_t)len, 1));
327
328 96094
        vc->ptr = (void *)mmap(NULL, len,
329
            PROT_READ|PROT_WRITE,
330
            MAP_HASSEMAPHORE | MAP_NOSYNC | MAP_SHARED,
331 48047
            fd, 0);
332
333 48047
        closefd(&fd);
334 48047
        assert(vc->ptr != MAP_FAILED);
335 48047
        if (mlock(vc->ptr, len) && warn++ == 0)  {
336 1924
                fprintf(stderr, "Warning: mlock() of VSM failed: %s (%d)\n",
337 962
                    VAS_errtxt(errno), errno);
338 962
                printmemlock();
339 962
        }
340
341 48047
        return (vc);
342
}
343
344
struct vsmw_cluster *
345 1212
VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx)
346
{
347
        struct vsmw_cluster *vc;
348
        struct vsmwseg *seg;
349
350 1212
        vsmw_do_lock();
351 1212
        vc = vsmw_newcluster(vsmw, len + VSM_CLUSTER_OFFSET, pfx);
352 1212
        AN(vc);
353 1212
        vc->next += VSM_CLUSTER_OFFSET;
354
355 1212
        ALLOC_OBJ(seg, VSMWSEG_MAGIC);
356 1212
        AN(seg);
357 1212
        vc->cseg = seg;
358 1212
        seg->len = vc->len;
359 1212
        seg->cluster = vc;
360 1212
        REPLACE(seg->category, "");
361 1212
        REPLACE(seg->id, "");
362 1212
        vc->refs++;
363 1212
        vc->named = 1;
364 1212
        vsmw_addseg(vsmw, seg);
365
366 1212
        vsmw_do_unlock();
367 1212
        return (vc);
368
}
369
370
static void
371 2022
vsmw_DestroyCluster_locked(struct vsmw *vsmw, struct vsmw_cluster *vc)
372
{
373
374 2022
        vsmw_assert_lock();
375 2022
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
376 2022
        CHECK_OBJ_NOTNULL(vc, VSMW_CLUSTER_MAGIC);
377
378 2022
        AZ(vc->refs);
379
380 2022
        AZ(munmap(vc->ptr, vc->len));
381 2022
        if (vc->named)
382 77
                vsmw_delseg(vsmw, vc->cseg);
383 2022
        vc->cseg = 0;
384
385 2022
        VTAILQ_REMOVE(&vsmw->clusters, vc, list);
386 2022
        if (unlinkat(vsmw->vdirfd, vc->fn, 0))
387 0
                assert (errno == ENOENT);
388 2022
        REPLACE(vc->fn, NULL);
389 2022
        FREE_OBJ(vc);
390 2022
}
391
392
void
393 77
VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vsmcp)
394
{
395
        struct vsmw_cluster *vc;
396
397 77
        TAKE_OBJ_NOTNULL(vc, vsmcp, VSMW_CLUSTER_MAGIC);
398
399 77
        vsmw_do_lock();
400 77
        if (--vc->refs == 0)
401 43
                vsmw_DestroyCluster_locked(vsmw, vc);
402 77
        vsmw_do_unlock();
403 77
}
404
405
/*--------------------------------------------------------------------*/
406
407
void *
408 48123
VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc,
409
    const char *category, size_t payload, const char *prefix,
410
    const char *fmt, va_list va)
411
{
412
        struct vsmwseg *seg;
413
        ssize_t l;
414
415 48123
        vsmw_do_lock();
416 48123
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
417
418 48123
        ALLOC_OBJ(seg, VSMWSEG_MAGIC);
419 48123
        AN(seg);
420 48123
        REPLACE(seg->category, category);
421 48123
        seg->len = PRNDUP(payload);
422
423 48123
        VSB_clear(vsmw->vsb);
424 48123
        if (prefix != NULL) {
425 34949
                assert(prefix[0] != '\0');
426 34949
                VSB_cat(vsmw->vsb, prefix);
427 34949
                if (fmt[0] != '\0')
428 32116
                        VSB_cat(vsmw->vsb, ".");
429 34949
        }
430 48123
        l = VSB_len(vsmw->vsb);
431 48123
        assert(l >= 0);
432 48123
        VSB_vprintf(vsmw->vsb, fmt, va);
433 48123
        AZ(VSB_finish(vsmw->vsb));
434 48123
        assert(fmt[0] == '\0' || l < VSB_len(vsmw->vsb));
435
436 48123
        REPLACE(seg->id, VSB_data(vsmw->vsb));
437
438 48123
        if (vc == NULL)
439 46835
                vc = vsmw_newcluster(vsmw, seg->len, category);
440 48123
        AN(vc);
441 48123
        vc->refs++;
442
443 48123
        seg->cluster = vc;
444 48123
        seg->off = vc->next;
445 48123
        vc->next += seg->len;
446 48123
        assert(vc->next <= vc->len);
447 48123
        seg->ptr = seg->off + (char*)vc->ptr;
448
449 48123
        vsmw_addseg(vsmw, seg);
450
451 48123
        vsmw_do_unlock();
452 48123
        return (seg->ptr);
453
}
454
455
void *
456 5742
VSMW_Allocf(struct vsmw *vsmw, struct vsmw_cluster *vc,
457
    const char *category, size_t len, const char *fmt, ...)
458
{
459
        va_list ap;
460
        void *p;
461
462 5742
        va_start(ap, fmt);
463 5742
        p = VSMW_Allocv(vsmw, vc, category, len, NULL, fmt, ap);
464 5742
        va_end(ap);
465 5742
        return (p);
466
}
467
468
/*--------------------------------------------------------------------*/
469
470
void
471 2009
VSMW_Free(struct vsmw *vsmw, void **pp)
472
{
473
        struct vsmwseg *seg;
474
        struct vsmw_cluster *cp;
475
476 2009
        vsmw_do_lock();
477 2009
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
478 2009
        AN(pp);
479 16381
        VTAILQ_FOREACH(seg, &vsmw->segs, list)
480 16381
                if (seg->ptr == *pp)
481 2009
                        break;
482 2009
        AN(seg);
483 2009
        *pp = NULL;
484
485 2009
        cp = seg->cluster;
486 2009
        CHECK_OBJ_NOTNULL(cp, VSMW_CLUSTER_MAGIC);
487 2009
        assert(cp->refs > 0);
488
489 2009
        vsmw_delseg(vsmw, seg);
490
491 2009
        if (!--cp->refs)
492 1979
                vsmw_DestroyCluster_locked(vsmw, cp);
493 2009
        vsmw_do_unlock();
494 2009
}
495
496
/*--------------------------------------------------------------------*/
497
498
struct vsmw *
499 1897
VSMW_New(int vdirfd, int mode, const char *idxname)
500
{
501
        struct vsmw *vsmw;
502
        int fd;
503
504 1897
        assert(vdirfd > 0);
505 1897
        assert(mode > 0);
506 1897
        AN(idxname);
507
508 1897
        vsmw_do_lock();
509 1897
        ALLOC_OBJ(vsmw, VSMW_MAGIC);
510 1897
        AN(vsmw);
511
512 1897
        VTAILQ_INIT(&vsmw->segs);
513 1897
        VTAILQ_INIT(&vsmw->clusters);
514 1897
        vsmw->vsb = VSB_new_auto();
515 1897
        AN(vsmw->vsb);
516 1897
        REPLACE(vsmw->idx, idxname);
517 1897
        vsmw->mode = mode;
518 1897
        vsmw->vdirfd = vdirfd;
519 1897
        vsmw->pid = getpid();
520 1897
        vsmw->birth = time(NULL);
521
522 1897
        if (unlinkat(vdirfd, vsmw->idx, 0))
523 1897
                assert (errno == ENOENT);
524 3794
        fd = openat(vdirfd,
525 1897
            vsmw->idx, O_APPEND | O_WRONLY | O_CREAT, vsmw->mode);
526 1897
        assert(fd >= 0);
527 1897
        vsmw_idx_head(vsmw, fd);
528 1897
        closefd(&fd);
529
530 1897
        vsmw_do_unlock();
531 1897
        return (vsmw);
532
}
533
534
void
535 962
VSMW_Destroy(struct vsmw **pp)
536
{
537
        struct vsmw *vsmw;
538
        struct vsmwseg *seg, *s2;
539
540 962
        vsmw_do_lock();
541 962
        TAKE_OBJ_NOTNULL(vsmw, pp, VSMW_MAGIC);
542 5771
        VTAILQ_FOREACH_SAFE(seg, &vsmw->segs, list, s2)
543 4809
                vsmw_delseg(vsmw, seg);
544 962
        if (unlinkat(vsmw->vdirfd, vsmw->idx, 0))
545 0
                assert (errno == ENOENT);
546 962
        REPLACE(vsmw->idx, NULL);
547 962
        VSB_destroy(&vsmw->vsb);
548 962
        closefd(&vsmw->vdirfd);
549 962
        FREE_OBJ(vsmw);
550 962
        vsmw_do_unlock();
551 962
}