| | varnish-cache/bin/varnishd/common/common_vsmw.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2010-2011 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* VSM stuff common to manager and child. |
30 |
|
* |
31 |
|
*/ |
32 |
|
|
33 |
|
#include "config.h" |
34 |
|
|
35 |
|
#include <fcntl.h> |
36 |
|
#include <stdarg.h> |
37 |
|
#include <stdio.h> |
38 |
|
#include <stdint.h> |
39 |
|
#include <stdlib.h> |
40 |
|
#include <string.h> |
41 |
|
#include <time.h> |
42 |
|
#include <unistd.h> |
43 |
|
#include <sys/mman.h> |
44 |
|
#include <sys/resource.h> |
45 |
|
#include <sys/stat.h> |
46 |
|
|
47 |
|
#include "vdef.h" |
48 |
|
#include "vas.h" |
49 |
|
#include "vsb.h" |
50 |
|
#include "miniobj.h" |
51 |
|
#include "vqueue.h" |
52 |
|
|
53 |
|
#include "vfil.h" |
54 |
|
#include "vrnd.h" |
55 |
|
|
56 |
|
#include "heritage.h" |
57 |
|
#include "vsmw.h" |
58 |
|
|
59 |
|
#ifndef MAP_HASSEMAPHORE |
60 |
|
# define MAP_HASSEMAPHORE 0 /* XXX Linux */ |
61 |
|
#endif |
62 |
|
|
63 |
|
#ifndef MAP_NOSYNC |
64 |
|
# define MAP_NOSYNC 0 /* XXX Linux */ |
65 |
|
#endif |
66 |
|
|
67 |
|
static void v_matchproto_(vsm_lock_f) |
68 |
827920 |
vsmw_dummy_lock(void) |
69 |
|
{ |
70 |
827920 |
} |
71 |
|
|
72 |
|
static int vsmw_haslock; |
73 |
|
vsm_lock_f *vsmw_lock = vsmw_dummy_lock; |
74 |
|
vsm_lock_f *vsmw_unlock = vsmw_dummy_lock; |
75 |
|
|
76 |
|
#define vsmw_assert_lock() AN(vsmw_haslock) |
77 |
|
|
78 |
|
#define vsmw_do_lock() vsmw_do_lock_(__func__, __LINE__) |
79 |
|
|
80 |
|
#define vsmw_do_lock_(f, l) \ |
81 |
|
do { \ |
82 |
|
vsmw_lock(); \ |
83 |
|
AZ(vsmw_haslock); \ |
84 |
|
vsmw_haslock = 1; \ |
85 |
|
} while(0) |
86 |
|
|
87 |
|
#define vsmw_do_unlock() vsmw_do_unlock_(__func__, __LINE__) |
88 |
|
#define vsmw_do_unlock_(f, l) \ |
89 |
|
do { \ |
90 |
|
AN(vsmw_haslock); \ |
91 |
|
vsmw_haslock = 0; \ |
92 |
|
vsmw_unlock(); \ |
93 |
|
} while(0) |
94 |
|
|
95 |
|
/*--------------------------------------------------------------------*/ |
96 |
|
|
97 |
|
struct vsmw_cluster { |
98 |
|
unsigned magic; |
99 |
|
#define VSMW_CLUSTER_MAGIC 0x28b74c00 |
100 |
|
|
101 |
|
VTAILQ_ENTRY(vsmw_cluster) list; |
102 |
|
struct vsmwseg *cseg; |
103 |
|
char *fn; |
104 |
|
size_t len; |
105 |
|
void *ptr; |
106 |
|
size_t next; |
107 |
|
int refs; |
108 |
|
int named; |
109 |
|
}; |
110 |
|
|
111 |
|
struct vsmwseg { |
112 |
|
unsigned magic; |
113 |
|
#define VSMWSEG_MAGIC 0x7e4ccaea |
114 |
|
VTAILQ_ENTRY(vsmwseg) list; |
115 |
|
struct vsmw_cluster *cluster; |
116 |
|
|
117 |
|
char *category; |
118 |
|
size_t off; |
119 |
|
size_t len; |
120 |
|
char *id; |
121 |
|
void *ptr; |
122 |
|
}; |
123 |
|
|
124 |
|
struct vsmw { |
125 |
|
unsigned magic; |
126 |
|
#define VSMW_MAGIC 0xc2ca2cd9 |
127 |
|
int vdirfd; |
128 |
|
int mode; |
129 |
|
char *idx; |
130 |
|
VTAILQ_HEAD(, vsmw_cluster) clusters; |
131 |
|
VTAILQ_HEAD(, vsmwseg) segs; |
132 |
|
struct vsb *vsb; |
133 |
|
pid_t pid; |
134 |
|
time_t birth; |
135 |
|
uint64_t nsegs; |
136 |
|
uint64_t nsubs; |
137 |
|
}; |
138 |
|
|
139 |
|
/* Allocations in clusters never start at offset zero */ |
140 |
|
#define VSM_CLUSTER_OFFSET 16 |
141 |
|
|
142 |
|
/*--------------------------------------------------------------------*/ |
143 |
|
|
144 |
|
static void |
145 |
74670 |
vsmw_idx_head(const struct vsmw *vsmw, int fd) |
146 |
|
{ |
147 |
|
char buf[64]; |
148 |
|
|
149 |
74670 |
bprintf(buf, "# %jd %jd\n", (intmax_t)vsmw->pid, (intmax_t)vsmw->birth); |
150 |
|
// XXX handle ENOSPC? #2764 |
151 |
74670 |
assert(write(fd, buf, strlen(buf)) == strlen(buf)); |
152 |
74670 |
} |
153 |
|
|
154 |
|
#define ASSERT_SEG_STR(x) do { \ |
155 |
|
AN(x); \ |
156 |
|
AZ(strchr(x, '\n')); \ |
157 |
|
} while (0); |
158 |
|
|
159 |
|
static void |
160 |
2058101 |
vsmw_fmt_index(const struct vsmw *vsmw, const struct vsmwseg *seg, char act) |
161 |
|
{ |
162 |
|
|
163 |
2058101 |
vsmw_assert_lock(); |
164 |
2058101 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
165 |
2058101 |
CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC); |
166 |
2058101 |
AN(seg->cluster); |
167 |
2058101 |
ASSERT_SEG_STR(seg->category); |
168 |
2058101 |
ASSERT_SEG_STR(seg->id); |
169 |
|
|
170 |
4116202 |
VSB_printf(vsmw->vsb, "%c %s %zu %zu %s %s\n", |
171 |
2058101 |
act, |
172 |
2058101 |
seg->cluster->fn, |
173 |
2058101 |
seg->off, |
174 |
2058101 |
seg->len, |
175 |
2058101 |
seg->category, |
176 |
2058101 |
seg->id); |
177 |
2058101 |
} |
178 |
|
|
179 |
|
/*--------------------------------------------------------------------*/ |
180 |
|
|
181 |
|
static void |
182 |
1735893 |
vsmw_mkent(const struct vsmw *vsmw, const char *pfx) |
183 |
|
{ |
184 |
|
int fd; |
185 |
|
uint64_t rn; |
186 |
|
|
187 |
1735893 |
AN(pfx); |
188 |
1735893 |
vsmw_assert_lock(); |
189 |
1735893 |
while (1) { |
190 |
1735893 |
VSB_clear(vsmw->vsb); |
191 |
1735893 |
VSB_printf(vsmw->vsb, "_.%s", pfx); |
192 |
1735893 |
AZ(VRND_RandomCrypto(&rn, sizeof rn)); |
193 |
1735893 |
VSB_printf(vsmw->vsb, ".%016jx", (uintmax_t)rn); |
194 |
1735893 |
AZ(VSB_finish(vsmw->vsb)); |
195 |
1735893 |
fd = openat(vsmw->vdirfd, VSB_data(vsmw->vsb), O_RDONLY); |
196 |
1735893 |
if (fd < 0 && errno == ENOENT) |
197 |
1735891 |
return; |
198 |
2 |
if (fd >= 0) |
199 |
0 |
closefd(&fd); |
200 |
|
} |
201 |
|
} |
202 |
|
|
203 |
|
/*--------------------------------------------------------------------*/ |
204 |
|
|
205 |
|
static void |
206 |
2055781 |
vsmw_append_record(struct vsmw *vsmw, struct vsmwseg *seg, char act) |
207 |
|
{ |
208 |
|
int fd; |
209 |
|
|
210 |
2055781 |
vsmw_assert_lock(); |
211 |
2055781 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
212 |
2055781 |
CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC); |
213 |
2055781 |
fd = openat(vsmw->vdirfd, vsmw->idx, O_APPEND | O_WRONLY); |
214 |
2055781 |
assert(fd >= 0); |
215 |
2055781 |
VSB_clear(vsmw->vsb); |
216 |
2055781 |
vsmw_fmt_index(vsmw, seg, act); |
217 |
2055781 |
AZ(VSB_finish(vsmw->vsb)); |
218 |
2055781 |
XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764 |
219 |
2055781 |
closefd(&fd); |
220 |
2055781 |
} |
221 |
|
|
222 |
|
/*--------------------------------------------------------------------*/ |
223 |
|
|
224 |
|
static void |
225 |
1786051 |
vsmw_addseg(struct vsmw *vsmw, struct vsmwseg *seg) |
226 |
|
{ |
227 |
|
|
228 |
1786051 |
vsmw_assert_lock(); |
229 |
1786051 |
VTAILQ_INSERT_TAIL(&vsmw->segs, seg, list); |
230 |
1786051 |
vsmw_append_record(vsmw, seg, '+'); |
231 |
1786051 |
vsmw->nsegs++; |
232 |
1786051 |
} |
233 |
|
|
234 |
|
/*--------------------------------------------------------------------*/ |
235 |
|
|
236 |
|
static void |
237 |
270090 |
vsmw_delseg(struct vsmw *vsmw, struct vsmwseg *seg) |
238 |
|
{ |
239 |
270090 |
char *t = NULL; |
240 |
|
int fd; |
241 |
|
struct vsmwseg *s2; |
242 |
|
|
243 |
270090 |
vsmw_assert_lock(); |
244 |
270090 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
245 |
270090 |
CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC); |
246 |
|
|
247 |
270090 |
VTAILQ_REMOVE(&vsmw->segs, seg, list); |
248 |
|
|
249 |
270090 |
vsmw->nsegs--; |
250 |
270090 |
if (vsmw->nsubs < 10 || vsmw->nsubs * 2 < vsmw->nsegs) { |
251 |
269730 |
vsmw_append_record(vsmw, seg, '-'); |
252 |
269730 |
vsmw->nsubs++; |
253 |
269730 |
} else { |
254 |
360 |
vsmw_mkent(vsmw, vsmw->idx); |
255 |
360 |
REPLACE(t, VSB_data(vsmw->vsb)); |
256 |
720 |
fd = openat(vsmw->vdirfd, |
257 |
360 |
t, O_WRONLY|O_CREAT|O_EXCL, vsmw->mode); |
258 |
360 |
assert(fd >= 0); |
259 |
360 |
vsmw_idx_head(vsmw, fd); |
260 |
360 |
VSB_clear(vsmw->vsb); |
261 |
2680 |
VTAILQ_FOREACH(s2, &vsmw->segs, list) |
262 |
2320 |
vsmw_fmt_index(vsmw, s2, '+'); |
263 |
360 |
AZ(VSB_finish(vsmw->vsb)); |
264 |
360 |
XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764 |
265 |
360 |
closefd(&fd); |
266 |
360 |
AZ(renameat(vsmw->vdirfd, t, vsmw->vdirfd, vsmw->idx)); |
267 |
360 |
REPLACE(t, NULL); |
268 |
360 |
vsmw->nsubs = 0; |
269 |
|
} |
270 |
270090 |
REPLACE(seg->category, NULL); |
271 |
270090 |
REPLACE(seg->id, NULL); |
272 |
270090 |
FREE_OBJ(seg); |
273 |
270090 |
} |
274 |
|
|
275 |
|
/*--------------------------------------------------------------------*/ |
276 |
|
|
277 |
|
#ifdef RLIMIT_MEMLOCK |
278 |
|
static void |
279 |
75360 |
printlim(const char *name, rlim_t lim) |
280 |
|
{ |
281 |
|
|
282 |
75360 |
fprintf(stderr, "Info: %s: ", name); |
283 |
75360 |
if (lim == RLIM_INFINITY) |
284 |
75360 |
fprintf(stderr, "unlimited\n"); |
285 |
|
else |
286 |
0 |
fprintf(stderr, "%ju bytes\n", (uintmax_t)lim); |
287 |
75360 |
} |
288 |
|
|
289 |
|
static void |
290 |
37680 |
printmemlock(void) { |
291 |
|
struct rlimit rlim; |
292 |
|
|
293 |
37680 |
AZ(getrlimit(RLIMIT_MEMLOCK, &rlim)); |
294 |
37680 |
printlim("max locked memory (soft)", rlim.rlim_cur); |
295 |
37680 |
printlim("max locked memory (hard)", rlim.rlim_max); |
296 |
37680 |
} |
297 |
|
#else |
298 |
|
static void printmemlock(void) {} |
299 |
|
#endif |
300 |
|
|
301 |
|
static struct vsmw_cluster * |
302 |
1735534 |
vsmw_newcluster(struct vsmw *vsmw, size_t len, const char *pfx) |
303 |
|
{ |
304 |
|
struct vsmw_cluster *vc; |
305 |
|
static int warn = 0; |
306 |
|
int fd; |
307 |
|
size_t ps; |
308 |
|
|
309 |
1735534 |
vsmw_assert_lock(); |
310 |
1735534 |
ALLOC_OBJ(vc, VSMW_CLUSTER_MAGIC); |
311 |
1735532 |
AN(vc); |
312 |
|
|
313 |
1735532 |
vsmw_mkent(vsmw, pfx); |
314 |
1735532 |
REPLACE(vc->fn, VSB_data(vsmw->vsb)); |
315 |
|
|
316 |
1735532 |
VTAILQ_INSERT_TAIL(&vsmw->clusters, vc, list); |
317 |
|
|
318 |
1735532 |
ps = getpagesize(); |
319 |
1735532 |
len = RUP2(len, ps); |
320 |
1735532 |
vc->len = len; |
321 |
|
|
322 |
3471064 |
fd = openat(vsmw->vdirfd, vc->fn, |
323 |
1735532 |
O_RDWR | O_CREAT | O_EXCL, vsmw->mode); |
324 |
1735532 |
assert(fd >= 0); |
325 |
|
|
326 |
1735532 |
AZ(VFIL_allocate(fd, (off_t)len, 1)); |
327 |
|
|
328 |
3471064 |
vc->ptr = (void *)mmap(NULL, len, |
329 |
|
PROT_READ|PROT_WRITE, |
330 |
|
MAP_HASSEMAPHORE | MAP_NOSYNC | MAP_SHARED, |
331 |
1735532 |
fd, 0); |
332 |
|
|
333 |
1735532 |
closefd(&fd); |
334 |
1735532 |
assert(vc->ptr != MAP_FAILED); |
335 |
1735532 |
if (mlock(vc->ptr, len) && warn++ == 0) { |
336 |
75360 |
fprintf(stderr, "Warning: mlock() of VSM failed: %s (%d)\n", |
337 |
37680 |
VAS_errtxt(errno), errno); |
338 |
37680 |
printmemlock(); |
339 |
37680 |
} |
340 |
|
|
341 |
1735532 |
return (vc); |
342 |
|
} |
343 |
|
|
344 |
|
struct vsmw_cluster * |
345 |
47360 |
VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx) |
346 |
|
{ |
347 |
|
struct vsmw_cluster *vc; |
348 |
|
struct vsmwseg *seg; |
349 |
|
|
350 |
47360 |
vsmw_do_lock(); |
351 |
47360 |
vc = vsmw_newcluster(vsmw, len + VSM_CLUSTER_OFFSET, pfx); |
352 |
47360 |
AN(vc); |
353 |
47360 |
vc->next += VSM_CLUSTER_OFFSET; |
354 |
|
|
355 |
47360 |
ALLOC_OBJ(seg, VSMWSEG_MAGIC); |
356 |
47360 |
AN(seg); |
357 |
47360 |
vc->cseg = seg; |
358 |
47360 |
seg->len = vc->len; |
359 |
47360 |
seg->cluster = vc; |
360 |
47360 |
REPLACE(seg->category, ""); |
361 |
47360 |
REPLACE(seg->id, ""); |
362 |
47360 |
vc->refs++; |
363 |
47360 |
vc->named = 1; |
364 |
47360 |
vsmw_addseg(vsmw, seg); |
365 |
|
|
366 |
47360 |
vsmw_do_unlock(); |
367 |
47360 |
return (vc); |
368 |
|
} |
369 |
|
|
370 |
|
static void |
371 |
79145 |
vsmw_DestroyCluster_locked(struct vsmw *vsmw, struct vsmw_cluster *vc) |
372 |
|
{ |
373 |
|
|
374 |
79145 |
vsmw_assert_lock(); |
375 |
79145 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
376 |
79145 |
CHECK_OBJ_NOTNULL(vc, VSMW_CLUSTER_MAGIC); |
377 |
|
|
378 |
79145 |
AZ(vc->refs); |
379 |
|
|
380 |
79145 |
AZ(munmap(vc->ptr, vc->len)); |
381 |
79145 |
if (vc->named) |
382 |
3105 |
vsmw_delseg(vsmw, vc->cseg); |
383 |
79145 |
vc->cseg = 0; |
384 |
|
|
385 |
79145 |
VTAILQ_REMOVE(&vsmw->clusters, vc, list); |
386 |
79145 |
if (unlinkat(vsmw->vdirfd, vc->fn, 0)) |
387 |
0 |
assert (errno == ENOENT); |
388 |
79145 |
REPLACE(vc->fn, NULL); |
389 |
79145 |
FREE_OBJ(vc); |
390 |
79145 |
} |
391 |
|
|
392 |
|
void |
393 |
3105 |
VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vsmcp) |
394 |
|
{ |
395 |
|
struct vsmw_cluster *vc; |
396 |
|
|
397 |
3105 |
TAKE_OBJ_NOTNULL(vc, vsmcp, VSMW_CLUSTER_MAGIC); |
398 |
|
|
399 |
3105 |
vsmw_do_lock(); |
400 |
3105 |
if (--vc->refs == 0) |
401 |
1720 |
vsmw_DestroyCluster_locked(vsmw, vc); |
402 |
3105 |
vsmw_do_unlock(); |
403 |
3105 |
} |
404 |
|
|
405 |
|
/*--------------------------------------------------------------------*/ |
406 |
|
|
407 |
|
void * |
408 |
1738693 |
VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc, |
409 |
|
const char *category, size_t payload, const char *prefix, |
410 |
|
const char *fmt, va_list va) |
411 |
|
{ |
412 |
|
struct vsmwseg *seg; |
413 |
|
ssize_t l; |
414 |
|
|
415 |
1738693 |
vsmw_do_lock(); |
416 |
1738693 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
417 |
|
|
418 |
1738693 |
ALLOC_OBJ(seg, VSMWSEG_MAGIC); |
419 |
1738693 |
AN(seg); |
420 |
1738693 |
REPLACE(seg->category, category); |
421 |
1738693 |
seg->len = PRNDUP(payload); |
422 |
|
|
423 |
1738693 |
VSB_clear(vsmw->vsb); |
424 |
1738693 |
if (prefix != NULL) { |
425 |
1259363 |
assert(prefix[0] != '\0'); |
426 |
1259363 |
VSB_cat(vsmw->vsb, prefix); |
427 |
1259363 |
if (fmt[0] != '\0') |
428 |
1185013 |
VSB_cat(vsmw->vsb, "."); |
429 |
1259363 |
} |
430 |
1738693 |
l = VSB_len(vsmw->vsb); |
431 |
1738693 |
assert(l >= 0); |
432 |
1738693 |
VSB_vprintf(vsmw->vsb, fmt, va); |
433 |
1738693 |
AZ(VSB_finish(vsmw->vsb)); |
434 |
1738693 |
assert(fmt[0] == '\0' || l < VSB_len(vsmw->vsb)); |
435 |
|
|
436 |
1738693 |
REPLACE(seg->id, VSB_data(vsmw->vsb)); |
437 |
|
|
438 |
1738693 |
if (vc == NULL) |
439 |
1688173 |
vc = vsmw_newcluster(vsmw, seg->len, category); |
440 |
1738693 |
AN(vc); |
441 |
1738693 |
vc->refs++; |
442 |
|
|
443 |
1738693 |
seg->cluster = vc; |
444 |
1738693 |
seg->off = vc->next; |
445 |
1738693 |
vc->next += seg->len; |
446 |
1738693 |
assert(vc->next <= vc->len); |
447 |
1738693 |
seg->ptr = seg->off + (char*)vc->ptr; |
448 |
|
|
449 |
1738693 |
vsmw_addseg(vsmw, seg); |
450 |
|
|
451 |
1738693 |
vsmw_do_unlock(); |
452 |
1738693 |
return (seg->ptr); |
453 |
|
} |
454 |
|
|
455 |
|
void * |
456 |
224750 |
VSMW_Allocf(struct vsmw *vsmw, struct vsmw_cluster *vc, |
457 |
|
const char *category, size_t len, const char *fmt, ...) |
458 |
|
{ |
459 |
|
va_list ap; |
460 |
|
void *p; |
461 |
|
|
462 |
224750 |
va_start(ap, fmt); |
463 |
224750 |
p = VSMW_Allocv(vsmw, vc, category, len, NULL, fmt, ap); |
464 |
224750 |
va_end(ap); |
465 |
224750 |
return (p); |
466 |
|
} |
467 |
|
|
468 |
|
/*--------------------------------------------------------------------*/ |
469 |
|
|
470 |
|
void |
471 |
78625 |
VSMW_Free(struct vsmw *vsmw, void **pp) |
472 |
|
{ |
473 |
|
struct vsmwseg *seg; |
474 |
|
struct vsmw_cluster *cp; |
475 |
|
|
476 |
78625 |
vsmw_do_lock(); |
477 |
78625 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
478 |
78625 |
AN(pp); |
479 |
630929 |
VTAILQ_FOREACH(seg, &vsmw->segs, list) |
480 |
630929 |
if (seg->ptr == *pp) |
481 |
78625 |
break; |
482 |
78625 |
AN(seg); |
483 |
78625 |
*pp = NULL; |
484 |
|
|
485 |
78625 |
cp = seg->cluster; |
486 |
78625 |
CHECK_OBJ_NOTNULL(cp, VSMW_CLUSTER_MAGIC); |
487 |
78625 |
assert(cp->refs > 0); |
488 |
|
|
489 |
78625 |
vsmw_delseg(vsmw, seg); |
490 |
|
|
491 |
78625 |
if (!--cp->refs) |
492 |
77425 |
vsmw_DestroyCluster_locked(vsmw, cp); |
493 |
78625 |
vsmw_do_unlock(); |
494 |
78625 |
} |
495 |
|
|
496 |
|
/*--------------------------------------------------------------------*/ |
497 |
|
|
498 |
|
struct vsmw * |
499 |
74310 |
VSMW_New(int vdirfd, int mode, const char *idxname) |
500 |
|
{ |
501 |
|
struct vsmw *vsmw; |
502 |
|
int fd; |
503 |
|
|
504 |
74310 |
assert(vdirfd > 0); |
505 |
74310 |
assert(mode > 0); |
506 |
74310 |
AN(idxname); |
507 |
|
|
508 |
74310 |
vsmw_do_lock(); |
509 |
74310 |
ALLOC_OBJ(vsmw, VSMW_MAGIC); |
510 |
74310 |
AN(vsmw); |
511 |
|
|
512 |
74310 |
VTAILQ_INIT(&vsmw->segs); |
513 |
74310 |
VTAILQ_INIT(&vsmw->clusters); |
514 |
74310 |
vsmw->vsb = VSB_new_auto(); |
515 |
74310 |
AN(vsmw->vsb); |
516 |
74310 |
REPLACE(vsmw->idx, idxname); |
517 |
74310 |
vsmw->mode = mode; |
518 |
74310 |
vsmw->vdirfd = vdirfd; |
519 |
74310 |
vsmw->pid = getpid(); |
520 |
74310 |
vsmw->birth = time(NULL); |
521 |
|
|
522 |
74310 |
if (unlinkat(vdirfd, vsmw->idx, 0)) |
523 |
74310 |
assert (errno == ENOENT); |
524 |
148620 |
fd = openat(vdirfd, |
525 |
74310 |
vsmw->idx, O_APPEND | O_WRONLY | O_CREAT, vsmw->mode); |
526 |
74310 |
assert(fd >= 0); |
527 |
74310 |
vsmw_idx_head(vsmw, fd); |
528 |
74310 |
closefd(&fd); |
529 |
|
|
530 |
74310 |
vsmw_do_unlock(); |
531 |
74310 |
return (vsmw); |
532 |
|
} |
533 |
|
|
534 |
|
void |
535 |
37680 |
VSMW_Destroy(struct vsmw **pp) |
536 |
|
{ |
537 |
|
struct vsmw *vsmw; |
538 |
|
struct vsmwseg *seg, *s2; |
539 |
|
|
540 |
37680 |
vsmw_do_lock(); |
541 |
37680 |
TAKE_OBJ_NOTNULL(vsmw, pp, VSMW_MAGIC); |
542 |
226040 |
VTAILQ_FOREACH_SAFE(seg, &vsmw->segs, list, s2) |
543 |
188360 |
vsmw_delseg(vsmw, seg); |
544 |
37680 |
if (unlinkat(vsmw->vdirfd, vsmw->idx, 0)) |
545 |
0 |
assert (errno == ENOENT); |
546 |
37680 |
REPLACE(vsmw->idx, NULL); |
547 |
37680 |
VSB_destroy(&vsmw->vsb); |
548 |
37680 |
closefd(&vsmw->vdirfd); |
549 |
37680 |
FREE_OBJ(vsmw); |
550 |
37680 |
vsmw_do_unlock(); |
551 |
37680 |
} |