| | varnish-cache/bin/varnishd/storage/storage_persistent.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2008-2011 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* Persistent storage method |
30 |
|
* |
31 |
|
* XXX: Before we start the client or maybe after it stops, we should give the |
32 |
|
* XXX: stevedores a chance to examine their storage for consistency. |
33 |
|
* |
34 |
|
* XXX: Do we ever free the LRU-lists ? |
35 |
|
*/ |
36 |
|
|
37 |
|
#include "config.h" |
38 |
|
|
39 |
|
#include "cache/cache_varnishd.h" |
40 |
|
|
41 |
|
#include <sys/mman.h> |
42 |
|
|
43 |
|
#include <stdio.h> |
44 |
|
#include <stdlib.h> |
45 |
|
|
46 |
|
#include "cache/cache_obj.h" |
47 |
|
#include "cache/cache_objhead.h" |
48 |
|
#include "storage/storage.h" |
49 |
|
#include "storage/storage_simple.h" |
50 |
|
|
51 |
|
#include "vcli_serve.h" |
52 |
|
#include "vsha256.h" |
53 |
|
#include "vtim.h" |
54 |
|
|
55 |
|
#include "storage/storage_persistent.h" |
56 |
|
|
57 |
|
static struct obj_methods smp_oc_realmethods; |
58 |
|
|
59 |
|
static struct VSC_lck *lck_smp; |
60 |
|
|
61 |
|
static void smp_init(void); |
62 |
|
|
63 |
|
/*--------------------------------------------------------------------*/ |
64 |
|
|
65 |
|
/* |
66 |
|
* silos is unlocked, it only changes during startup when we are |
67 |
|
* single-threaded |
68 |
|
*/ |
69 |
|
static VTAILQ_HEAD(,smp_sc) silos = VTAILQ_HEAD_INITIALIZER(silos); |
70 |
|
|
71 |
|
/*-------------------------------------------------------------------- |
72 |
|
* Add bans to silos |
73 |
|
*/ |
74 |
|
|
75 |
|
static int |
76 |
3840 |
smp_appendban(const struct smp_sc *sc, struct smp_signspace *spc, |
77 |
|
uint32_t len, const uint8_t *ban) |
78 |
|
{ |
79 |
|
|
80 |
3840 |
(void)sc; |
81 |
3840 |
if (SIGNSPACE_FREE(spc) < len) |
82 |
0 |
return (-1); |
83 |
|
|
84 |
3840 |
memcpy(SIGNSPACE_FRONT(spc), ban, len); |
85 |
3840 |
smp_append_signspace(spc, len); |
86 |
|
|
87 |
3840 |
return (0); |
88 |
3840 |
} |
89 |
|
|
90 |
|
/* Trust that cache_ban.c takes care of locking */ |
91 |
|
|
92 |
|
static int |
93 |
2760 |
smp_baninfo(const struct stevedore *stv, enum baninfo event, |
94 |
|
const uint8_t *ban, unsigned len) |
95 |
|
{ |
96 |
|
struct smp_sc *sc; |
97 |
2760 |
int r = 0; |
98 |
|
|
99 |
2760 |
CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC); |
100 |
|
|
101 |
2760 |
switch (event) { |
102 |
|
case BI_NEW: |
103 |
1920 |
r |= smp_appendban(sc, &sc->ban1, len, ban); |
104 |
1920 |
r |= smp_appendban(sc, &sc->ban2, len, ban); |
105 |
1920 |
break; |
106 |
|
default: |
107 |
|
/* Ignored */ |
108 |
840 |
break; |
109 |
|
} |
110 |
|
|
111 |
2760 |
return (r); |
112 |
|
} |
113 |
|
|
114 |
|
static void |
115 |
6000 |
smp_banexport_spc(struct smp_signspace *spc, const uint8_t *bans, unsigned len) |
116 |
|
{ |
117 |
6000 |
smp_reset_signspace(spc); |
118 |
6000 |
assert(SIGNSPACE_FREE(spc) >= len); |
119 |
6000 |
memcpy(SIGNSPACE_DATA(spc), bans, len); |
120 |
6000 |
smp_append_signspace(spc, len); |
121 |
6000 |
smp_sync_sign(&spc->ctx); |
122 |
6000 |
} |
123 |
|
|
124 |
|
static void |
125 |
3000 |
smp_banexport(const struct stevedore *stv, const uint8_t *bans, unsigned len) |
126 |
|
{ |
127 |
|
struct smp_sc *sc; |
128 |
|
|
129 |
3000 |
CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC); |
130 |
3000 |
smp_banexport_spc(&sc->ban1, bans, len); |
131 |
3000 |
smp_banexport_spc(&sc->ban2, bans, len); |
132 |
3000 |
} |
133 |
|
|
134 |
|
/*-------------------------------------------------------------------- |
135 |
|
* Attempt to open and read in a ban list |
136 |
|
*/ |
137 |
|
|
138 |
|
static int |
139 |
1520 |
smp_open_bans(const struct smp_sc *sc, struct smp_signspace *spc) |
140 |
|
{ |
141 |
|
uint8_t *ptr, *pe; |
142 |
|
int i; |
143 |
|
|
144 |
1520 |
ASSERT_CLI(); |
145 |
1520 |
(void)sc; |
146 |
1520 |
i = smp_chk_signspace(spc); |
147 |
1520 |
if (i) |
148 |
0 |
return (i); |
149 |
|
|
150 |
1520 |
ptr = SIGNSPACE_DATA(spc); |
151 |
1520 |
pe = SIGNSPACE_FRONT(spc); |
152 |
1520 |
BAN_Reload(ptr, pe - ptr); |
153 |
|
|
154 |
1520 |
return (0); |
155 |
1520 |
} |
156 |
|
|
157 |
|
/*-------------------------------------------------------------------- |
158 |
|
* Attempt to open and read in a segment list |
159 |
|
*/ |
160 |
|
|
161 |
|
static int |
162 |
1520 |
smp_open_segs(struct smp_sc *sc, struct smp_signspace *spc) |
163 |
|
{ |
164 |
|
uint64_t length, l; |
165 |
|
struct smp_segptr *ss, *se; |
166 |
|
struct smp_seg *sg, *sg1, *sg2; |
167 |
1520 |
int i, n = 0; |
168 |
|
|
169 |
1520 |
ASSERT_CLI(); |
170 |
1520 |
i = smp_chk_signspace(spc); |
171 |
1520 |
if (i) |
172 |
0 |
return (i); |
173 |
|
|
174 |
1520 |
ss = SIGNSPACE_DATA(spc); |
175 |
1520 |
length = SIGNSPACE_LEN(spc); |
176 |
|
|
177 |
1520 |
if (length == 0) { |
178 |
|
/* No segments */ |
179 |
920 |
sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF]; |
180 |
920 |
return (0); |
181 |
|
} |
182 |
600 |
se = ss + length / sizeof *ss; |
183 |
600 |
se--; |
184 |
600 |
assert(ss <= se); |
185 |
|
|
186 |
|
/* |
187 |
|
* Locate the free reserve, there are only two basic cases, |
188 |
|
* but once we start dropping segments, things gets more complicated. |
189 |
|
*/ |
190 |
|
|
191 |
600 |
sc->free_offset = se->offset + se->length; |
192 |
600 |
l = sc->mediasize - sc->free_offset; |
193 |
600 |
if (se->offset > ss->offset && l >= sc->free_reserve) { |
194 |
|
/* |
195 |
|
* [__xxxxyyyyzzzz___] |
196 |
|
* Plenty of space at tail, do nothing. |
197 |
|
*/ |
198 |
600 |
} else if (ss->offset > se->offset) { |
199 |
|
/* |
200 |
|
* [zzzz____xxxxyyyy_] |
201 |
|
* (make) space between ends |
202 |
|
* We might nuke the entire tail end without getting |
203 |
|
* enough space, in which case we fall through to the |
204 |
|
* last check. |
205 |
|
*/ |
206 |
0 |
while (ss < se && ss->offset > se->offset) { |
207 |
0 |
l = ss->offset - (se->offset + se->length); |
208 |
0 |
if (l > sc->free_reserve) |
209 |
0 |
break; |
210 |
0 |
ss++; |
211 |
0 |
n++; |
212 |
|
} |
213 |
0 |
} |
214 |
|
|
215 |
600 |
if (l < sc->free_reserve) { |
216 |
|
/* |
217 |
|
* [__xxxxyyyyzzzz___] |
218 |
|
* (make) space at front |
219 |
|
*/ |
220 |
0 |
sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF]; |
221 |
0 |
while (ss < se) { |
222 |
0 |
l = ss->offset - sc->free_offset; |
223 |
0 |
if (l > sc->free_reserve) |
224 |
0 |
break; |
225 |
0 |
ss++; |
226 |
0 |
n++; |
227 |
|
} |
228 |
0 |
} |
229 |
|
|
230 |
600 |
assert(l >= sc->free_reserve); |
231 |
|
|
232 |
|
|
233 |
600 |
sg1 = NULL; |
234 |
600 |
sg2 = NULL; |
235 |
1200 |
for (; ss <= se; ss++) { |
236 |
600 |
ALLOC_OBJ(sg, SMP_SEG_MAGIC); |
237 |
600 |
AN(sg); |
238 |
600 |
VTAILQ_INIT(&sg->objcores); |
239 |
600 |
sg->p = *ss; |
240 |
|
|
241 |
600 |
sg->flags |= SMP_SEG_MUSTLOAD; |
242 |
|
|
243 |
|
/* |
244 |
|
* HACK: prevent save_segs from nuking segment until we have |
245 |
|
* HACK: loaded it. |
246 |
|
*/ |
247 |
600 |
sg->nobj = 1; |
248 |
600 |
if (sg1 != NULL) { |
249 |
0 |
assert(sg1->p.offset != sg->p.offset); |
250 |
0 |
if (sg1->p.offset < sg->p.offset) |
251 |
0 |
assert(smp_segend(sg1) <= sg->p.offset); |
252 |
|
else |
253 |
0 |
assert(smp_segend(sg) <= sg1->p.offset); |
254 |
0 |
} |
255 |
600 |
if (sg2 != NULL) { |
256 |
0 |
assert(sg2->p.offset != sg->p.offset); |
257 |
0 |
if (sg2->p.offset < sg->p.offset) |
258 |
0 |
assert(smp_segend(sg2) <= sg->p.offset); |
259 |
|
else |
260 |
0 |
assert(smp_segend(sg) <= sg2->p.offset); |
261 |
0 |
} |
262 |
|
|
263 |
|
/* XXX: check that they are inside silo */ |
264 |
|
/* XXX: check that they don't overlap */ |
265 |
|
/* XXX: check that they are serial */ |
266 |
600 |
sg->sc = sc; |
267 |
600 |
VTAILQ_INSERT_TAIL(&sc->segments, sg, list); |
268 |
600 |
sg2 = sg; |
269 |
600 |
if (sg1 == NULL) |
270 |
600 |
sg1 = sg; |
271 |
600 |
} |
272 |
600 |
printf("Dropped %d segments to make free_reserve\n", n); |
273 |
600 |
return (0); |
274 |
1520 |
} |
275 |
|
|
276 |
|
/*-------------------------------------------------------------------- |
277 |
|
* Silo worker thread |
278 |
|
*/ |
279 |
|
|
280 |
|
static void * v_matchproto_(bgthread_t) |
281 |
1520 |
smp_thread(struct worker *wrk, void *priv) |
282 |
|
{ |
283 |
|
struct smp_sc *sc; |
284 |
|
struct smp_seg *sg; |
285 |
|
|
286 |
1520 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
287 |
1520 |
CAST_OBJ_NOTNULL(sc, priv, SMP_SC_MAGIC); |
288 |
1520 |
sc->thread = pthread_self(); |
289 |
|
|
290 |
|
/* First, load all the objects from all segments */ |
291 |
3640 |
VTAILQ_FOREACH(sg, &sc->segments, list) |
292 |
2720 |
if (sg->flags & SMP_SEG_MUSTLOAD) |
293 |
600 |
smp_load_seg(wrk, sc, sg); |
294 |
|
|
295 |
1520 |
sc->flags |= SMP_SC_LOADED; |
296 |
1520 |
BAN_Release(); |
297 |
1520 |
printf("Silo completely loaded\n"); |
298 |
|
|
299 |
|
/* Housekeeping loop */ |
300 |
1520 |
Lck_Lock(&sc->mtx); |
301 |
3278 |
while (!(sc->flags & SMP_SC_STOP)) { |
302 |
1758 |
sg = VTAILQ_FIRST(&sc->segments); |
303 |
1758 |
if (sg != NULL && sg != sc->cur_seg && sg->nobj == 0) |
304 |
72 |
smp_save_segs(sc); |
305 |
|
|
306 |
1758 |
Lck_Unlock(&sc->mtx); |
307 |
1758 |
VTIM_sleep(3.14159265359 - 2); |
308 |
1758 |
Lck_Lock(&sc->mtx); |
309 |
|
} |
310 |
|
|
311 |
1520 |
smp_save_segs(sc); |
312 |
|
|
313 |
1520 |
Lck_Unlock(&sc->mtx); |
314 |
1520 |
pthread_exit(0); |
315 |
|
|
316 |
|
NEEDLESS(return (NULL)); |
317 |
|
} |
318 |
|
|
319 |
|
/*-------------------------------------------------------------------- |
320 |
|
* Open a silo in the worker process |
321 |
|
*/ |
322 |
|
|
323 |
|
static void v_matchproto_(storage_open_f) |
324 |
1520 |
smp_open(struct stevedore *st) |
325 |
|
{ |
326 |
|
struct smp_sc *sc; |
327 |
|
|
328 |
1520 |
ASSERT_CLI(); |
329 |
|
|
330 |
1520 |
if (VTAILQ_EMPTY(&silos)) |
331 |
1160 |
smp_init(); |
332 |
|
|
333 |
1520 |
CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC); |
334 |
|
|
335 |
1520 |
Lck_New(&sc->mtx, lck_smp); |
336 |
1520 |
Lck_Lock(&sc->mtx); |
337 |
|
|
338 |
1520 |
sc->stevedore = st; |
339 |
|
|
340 |
|
/* We trust the parent to give us a valid silo, for good measure: */ |
341 |
1520 |
AZ(smp_valid_silo(sc)); |
342 |
|
|
343 |
1520 |
AZ(mprotect((void*)sc->base, 4096, PROT_READ)); |
344 |
|
|
345 |
1520 |
sc->ident = SIGN_DATA(&sc->idn); |
346 |
|
|
347 |
|
/* Check ban lists */ |
348 |
1520 |
if (smp_chk_signspace(&sc->ban1)) { |
349 |
|
/* Ban list 1 is broken, use ban2 */ |
350 |
0 |
AZ(smp_chk_signspace(&sc->ban2)); |
351 |
0 |
smp_copy_signspace(&sc->ban1, &sc->ban2); |
352 |
0 |
smp_sync_sign(&sc->ban1.ctx); |
353 |
0 |
} else { |
354 |
|
/* Ban1 is OK, copy to ban2 for consistency */ |
355 |
1520 |
smp_copy_signspace(&sc->ban2, &sc->ban1); |
356 |
1520 |
smp_sync_sign(&sc->ban2.ctx); |
357 |
|
} |
358 |
1520 |
AZ(smp_open_bans(sc, &sc->ban1)); |
359 |
|
|
360 |
|
/* We attempt seg1 first, and if that fails, try seg2 */ |
361 |
1520 |
if (smp_open_segs(sc, &sc->seg1)) |
362 |
0 |
AZ(smp_open_segs(sc, &sc->seg2)); |
363 |
|
|
364 |
|
/* |
365 |
|
* Grab a reference to the tail of the ban list, until the thread |
366 |
|
* has loaded all objects, so we can be sure that all of our |
367 |
|
* proto-bans survive until then. |
368 |
|
*/ |
369 |
1520 |
BAN_Hold(); |
370 |
|
|
371 |
|
/* XXX: save segments to ensure consistency between seg1 & seg2 ? */ |
372 |
|
|
373 |
|
/* XXX: abandon early segments to make sure we have free space ? */ |
374 |
|
|
375 |
1520 |
(void)ObjSubscribeEvents(smp_oc_event, st, |
376 |
|
OEV_BANCHG|OEV_TTLCHG|OEV_INSERT); |
377 |
|
|
378 |
|
/* Open a new segment, so we are ready to write */ |
379 |
1520 |
smp_new_seg(sc); |
380 |
|
|
381 |
|
/* Start the worker silo worker thread, it will load the objects */ |
382 |
1520 |
WRK_BgThread(&sc->bgthread, "persistence", smp_thread, sc); |
383 |
|
|
384 |
1520 |
VTAILQ_INSERT_TAIL(&silos, sc, list); |
385 |
1520 |
Lck_Unlock(&sc->mtx); |
386 |
1520 |
} |
387 |
|
|
388 |
|
/*-------------------------------------------------------------------- |
389 |
|
* Close a silo |
390 |
|
*/ |
391 |
|
|
392 |
|
static void v_matchproto_(storage_close_f) |
393 |
2960 |
smp_close(const struct stevedore *st, int warn) |
394 |
|
{ |
395 |
|
struct smp_sc *sc; |
396 |
|
void *status; |
397 |
|
|
398 |
2960 |
ASSERT_CLI(); |
399 |
|
|
400 |
2960 |
CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC); |
401 |
2960 |
if (warn) { |
402 |
1480 |
Lck_Lock(&sc->mtx); |
403 |
1480 |
if (sc->cur_seg != NULL) |
404 |
1480 |
smp_close_seg(sc, sc->cur_seg); |
405 |
1480 |
AZ(sc->cur_seg); |
406 |
1480 |
sc->flags |= SMP_SC_STOP; |
407 |
1480 |
Lck_Unlock(&sc->mtx); |
408 |
1480 |
} else { |
409 |
1480 |
PTOK(pthread_join(sc->bgthread, &status)); |
410 |
1480 |
AZ(status); |
411 |
|
} |
412 |
2960 |
} |
413 |
|
|
414 |
|
/*-------------------------------------------------------------------- |
415 |
|
* Allocate a bite. |
416 |
|
* |
417 |
|
* Allocate [min_size...max_size] space from the bottom of the segment, |
418 |
|
* as is convenient. |
419 |
|
* |
420 |
|
* If 'so' + 'idx' is given, also allocate a smp_object from the top |
421 |
|
* of the segment. |
422 |
|
* |
423 |
|
* Return the segment in 'ssg' if given. |
424 |
|
*/ |
425 |
|
|
426 |
|
static struct storage * |
427 |
1120 |
smp_allocx(const struct stevedore *st, size_t min_size, size_t max_size, |
428 |
|
struct smp_object **so, unsigned *idx, struct smp_seg **ssg) |
429 |
|
{ |
430 |
|
struct smp_sc *sc; |
431 |
|
struct storage *ss; |
432 |
|
struct smp_seg *sg; |
433 |
|
uint64_t left, extra; |
434 |
|
|
435 |
1120 |
CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC); |
436 |
1120 |
assert(min_size <= max_size); |
437 |
|
|
438 |
1120 |
max_size = IRNUP(sc, max_size); |
439 |
1120 |
min_size = IRNUP(sc, min_size); |
440 |
|
|
441 |
1120 |
extra = IRNUP(sc, sizeof(*ss)); |
442 |
1120 |
if (so != NULL) { |
443 |
880 |
extra += sizeof(**so); |
444 |
880 |
AN(idx); |
445 |
880 |
} |
446 |
|
|
447 |
1120 |
Lck_Lock(&sc->mtx); |
448 |
1120 |
sg = NULL; |
449 |
1120 |
ss = NULL; |
450 |
|
|
451 |
1120 |
left = 0; |
452 |
1120 |
if (sc->cur_seg != NULL) |
453 |
1120 |
left = smp_spaceleft(sc, sc->cur_seg); |
454 |
1120 |
if (left < extra + min_size) { |
455 |
0 |
if (sc->cur_seg != NULL) |
456 |
0 |
smp_close_seg(sc, sc->cur_seg); |
457 |
0 |
smp_new_seg(sc); |
458 |
0 |
if (sc->cur_seg != NULL) |
459 |
0 |
left = smp_spaceleft(sc, sc->cur_seg); |
460 |
|
else |
461 |
0 |
left = 0; |
462 |
0 |
} |
463 |
|
|
464 |
1120 |
if (left >= extra + min_size) { |
465 |
1120 |
AN(sc->cur_seg); |
466 |
1120 |
if (left < extra + max_size) |
467 |
0 |
max_size = IRNDN(sc, left - extra); |
468 |
|
|
469 |
1120 |
sg = sc->cur_seg; |
470 |
1120 |
ss = (void*)(sc->base + sc->next_bot); |
471 |
1120 |
sc->next_bot += max_size + IRNUP(sc, sizeof(*ss)); |
472 |
1120 |
sg->nalloc++; |
473 |
1120 |
if (so != NULL) { |
474 |
880 |
sc->next_top -= sizeof(**so); |
475 |
880 |
*so = (void*)(sc->base + sc->next_top); |
476 |
|
/* Render this smp_object mostly harmless */ |
477 |
880 |
EXP_ZERO((*so)); |
478 |
880 |
(*so)->ban = 0.; |
479 |
880 |
(*so)->ptr = 0; |
480 |
880 |
sg->objs = *so; |
481 |
880 |
*idx = ++sg->p.lobjlist; |
482 |
880 |
} |
483 |
1120 |
(void)smp_spaceleft(sc, sg); /* for the assert */ |
484 |
1120 |
} |
485 |
1120 |
Lck_Unlock(&sc->mtx); |
486 |
|
|
487 |
1120 |
if (ss == NULL) |
488 |
0 |
return (ss); |
489 |
1120 |
AN(sg); |
490 |
1120 |
assert(max_size >= min_size); |
491 |
|
|
492 |
|
/* Fill the storage structure */ |
493 |
1120 |
INIT_OBJ(ss, STORAGE_MAGIC); |
494 |
1120 |
ss->ptr = PRNUP(sc, ss + 1); |
495 |
1120 |
ss->space = max_size; |
496 |
1120 |
ss->priv = sc->base; |
497 |
1120 |
if (ssg != NULL) |
498 |
880 |
*ssg = sg; |
499 |
1120 |
return (ss); |
500 |
1120 |
} |
501 |
|
|
502 |
|
/*-------------------------------------------------------------------- |
503 |
|
* Allocate an object |
504 |
|
*/ |
505 |
|
|
506 |
|
static int v_matchproto_(storage_allocobj_f) |
507 |
880 |
smp_allocobj(struct worker *wrk, const struct stevedore *stv, |
508 |
|
struct objcore *oc, unsigned wsl) |
509 |
|
{ |
510 |
|
struct object *o; |
511 |
|
struct storage *st; |
512 |
|
struct smp_sc *sc; |
513 |
|
struct smp_seg *sg; |
514 |
|
struct smp_object *so; |
515 |
|
unsigned objidx; |
516 |
|
unsigned ltot; |
517 |
|
|
518 |
880 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
519 |
880 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
520 |
880 |
CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC); |
521 |
|
|
522 |
|
/* Don't entertain already dead objects */ |
523 |
880 |
if (oc->flags & OC_F_DYING) |
524 |
0 |
return (0); |
525 |
880 |
if (oc->t_origin <= 0.) |
526 |
0 |
return (0); |
527 |
880 |
if (oc->ttl + oc->grace + oc->keep <= 0.) |
528 |
0 |
return (0); |
529 |
|
|
530 |
880 |
ltot = sizeof(struct object) + PRNDUP(wsl); |
531 |
880 |
ltot = IRNUP(sc, ltot); |
532 |
|
|
533 |
880 |
st = NULL; |
534 |
880 |
sg = NULL; |
535 |
880 |
so = NULL; |
536 |
880 |
objidx = 0; |
537 |
|
|
538 |
880 |
do { |
539 |
880 |
st = smp_allocx(stv, ltot, ltot, &so, &objidx, &sg); |
540 |
880 |
if (st != NULL && st->space < ltot) { |
541 |
0 |
stv->sml_free(st); // NOP |
542 |
0 |
st = NULL; |
543 |
0 |
} |
544 |
880 |
} while (st == NULL && LRU_NukeOne(wrk, stv->lru)); |
545 |
880 |
if (st == NULL) |
546 |
0 |
return (0); |
547 |
|
|
548 |
880 |
AN(st); |
549 |
880 |
AN(sg); |
550 |
880 |
AN(so); |
551 |
880 |
assert(st->space >= ltot); |
552 |
|
|
553 |
880 |
o = SML_MkObject(stv, oc, st->ptr); |
554 |
880 |
AN(oc->stobj->stevedore); |
555 |
880 |
assert(oc->stobj->stevedore == stv); |
556 |
880 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
557 |
880 |
o->objstore = st; |
558 |
880 |
st->len = sizeof(*o); |
559 |
|
|
560 |
880 |
Lck_Lock(&sc->mtx); |
561 |
880 |
sg->nfixed++; |
562 |
880 |
sg->nobj++; |
563 |
|
|
564 |
|
/* We have to do this somewhere, might as well be here... */ |
565 |
880 |
assert(sizeof so->hash == DIGEST_LEN); |
566 |
880 |
memcpy(so->hash, oc->objhead->digest, DIGEST_LEN); |
567 |
880 |
EXP_COPY(so, oc); |
568 |
880 |
so->ptr = (uint8_t*)(o->objstore) - sc->base; |
569 |
880 |
so->ban = BAN_Time(oc->ban); |
570 |
|
|
571 |
880 |
smp_init_oc(oc, sg, objidx); |
572 |
|
|
573 |
880 |
VTAILQ_INSERT_TAIL(&sg->objcores, oc, lru_list); |
574 |
880 |
Lck_Unlock(&sc->mtx); |
575 |
880 |
return (1); |
576 |
880 |
} |
577 |
|
|
578 |
|
/*-------------------------------------------------------------------- |
579 |
|
* Allocate a bite |
580 |
|
*/ |
581 |
|
|
582 |
|
static struct storage * v_matchproto_(sml_alloc_f) |
583 |
240 |
smp_alloc(const struct stevedore *st, size_t size) |
584 |
|
{ |
585 |
|
|
586 |
480 |
return (smp_allocx(st, |
587 |
240 |
size > 4096 ? 4096 : size, size, NULL, NULL, NULL)); |
588 |
|
} |
589 |
|
|
590 |
|
/*--------------------------------------------------------------------*/ |
591 |
|
|
592 |
|
const struct stevedore smp_stevedore = { |
593 |
|
.magic = STEVEDORE_MAGIC, |
594 |
|
.name = "deprecated_persistent", |
595 |
|
.init = smp_mgt_init, |
596 |
|
.open = smp_open, |
597 |
|
.close = smp_close, |
598 |
|
.allocobj = smp_allocobj, |
599 |
|
.baninfo = smp_baninfo, |
600 |
|
.banexport = smp_banexport, |
601 |
|
.methods = &smp_oc_realmethods, |
602 |
|
|
603 |
|
.sml_alloc = smp_alloc, |
604 |
|
.sml_free = NULL, |
605 |
|
.sml_getobj = smp_sml_getobj, |
606 |
|
}; |
607 |
|
|
608 |
|
/*-------------------------------------------------------------------- |
609 |
|
* Persistence is a bear to test unadulterated, so we cheat by adding |
610 |
|
* a cli command we can use to make it do tricks for us. |
611 |
|
*/ |
612 |
|
|
613 |
|
static void |
614 |
120 |
debug_report_silo(struct cli *cli, const struct smp_sc *sc) |
615 |
|
{ |
616 |
|
struct smp_seg *sg; |
617 |
|
|
618 |
240 |
VCLI_Out(cli, "Silo: %s (%s)\n", |
619 |
120 |
sc->stevedore->ident, sc->filename); |
620 |
280 |
VTAILQ_FOREACH(sg, &sc->segments, list) { |
621 |
320 |
VCLI_Out(cli, " Seg: [0x%jx ... +0x%jx]\n", |
622 |
160 |
(uintmax_t)sg->p.offset, (uintmax_t)sg->p.length); |
623 |
160 |
if (sg == sc->cur_seg) |
624 |
240 |
VCLI_Out(cli, |
625 |
|
" Alloc: [0x%jx ... 0x%jx] = 0x%jx free\n", |
626 |
120 |
(uintmax_t)(sc->next_bot), |
627 |
120 |
(uintmax_t)(sc->next_top), |
628 |
120 |
(uintmax_t)(sc->next_top - sc->next_bot)); |
629 |
320 |
VCLI_Out(cli, " %u nobj, %u alloc, %u lobjlist, %u fixed\n", |
630 |
160 |
sg->nobj, sg->nalloc, sg->p.lobjlist, sg->nfixed); |
631 |
160 |
} |
632 |
120 |
} |
633 |
|
|
634 |
|
static void v_matchproto_(cli_func_t) |
635 |
240 |
debug_persistent(struct cli *cli, const char * const * av, void *priv) |
636 |
|
{ |
637 |
|
struct smp_sc *sc; |
638 |
|
|
639 |
240 |
(void)priv; |
640 |
|
|
641 |
240 |
if (av[2] == NULL) { |
642 |
0 |
VTAILQ_FOREACH(sc, &silos, list) |
643 |
0 |
debug_report_silo(cli, sc); |
644 |
0 |
return; |
645 |
|
} |
646 |
240 |
VTAILQ_FOREACH(sc, &silos, list) |
647 |
240 |
if (!strcmp(av[2], sc->stevedore->ident)) |
648 |
240 |
break; |
649 |
240 |
if (sc == NULL) { |
650 |
0 |
VCLI_Out(cli, "Silo <%s> not found\n", av[2]); |
651 |
0 |
VCLI_SetResult(cli, CLIS_PARAM); |
652 |
0 |
return; |
653 |
|
} |
654 |
240 |
if (av[3] == NULL) { |
655 |
0 |
debug_report_silo(cli, sc); |
656 |
0 |
return; |
657 |
|
} |
658 |
240 |
Lck_Lock(&sc->mtx); |
659 |
240 |
if (!strcmp(av[3], "sync")) { |
660 |
120 |
if (sc->cur_seg != NULL) |
661 |
120 |
smp_close_seg(sc, sc->cur_seg); |
662 |
120 |
smp_new_seg(sc); |
663 |
240 |
} else if (!strcmp(av[3], "dump")) { |
664 |
120 |
debug_report_silo(cli, sc); |
665 |
120 |
} else { |
666 |
0 |
VCLI_Out(cli, "Unknown operation\n"); |
667 |
0 |
VCLI_SetResult(cli, CLIS_PARAM); |
668 |
|
} |
669 |
240 |
Lck_Unlock(&sc->mtx); |
670 |
240 |
} |
671 |
|
|
672 |
|
static struct cli_proto debug_cmds[] = { |
673 |
|
{ CLICMD_DEBUG_PERSISTENT, "d", debug_persistent }, |
674 |
|
{ NULL } |
675 |
|
}; |
676 |
|
|
677 |
|
/*-------------------------------------------------------------------- |
678 |
|
*/ |
679 |
|
|
680 |
|
static void |
681 |
1160 |
smp_init(void) |
682 |
|
{ |
683 |
1160 |
lck_smp = Lck_CreateClass(NULL, "smp"); |
684 |
1160 |
CLI_AddFuncs(debug_cmds); |
685 |
1160 |
smp_oc_realmethods.objfree = SML_methods.objfree; |
686 |
1160 |
smp_oc_realmethods.objiterator = SML_methods.objiterator; |
687 |
1160 |
smp_oc_realmethods.objgetspace = SML_methods.objgetspace; |
688 |
1160 |
smp_oc_realmethods.objextend = SML_methods.objextend; |
689 |
1160 |
smp_oc_realmethods.objbocdone = SML_methods.objbocdone; |
690 |
1160 |
smp_oc_realmethods.objgetattr = SML_methods.objgetattr; |
691 |
1160 |
smp_oc_realmethods.objsetattr = SML_methods.objsetattr; |
692 |
1160 |
smp_oc_realmethods.objtouch = LRU_Touch; |
693 |
1160 |
smp_oc_realmethods.objfree = smp_oc_objfree; |
694 |
1160 |
} |
695 |
|
|
696 |
|
/*-------------------------------------------------------------------- |
697 |
|
* Pause until all silos have loaded. |
698 |
|
*/ |
699 |
|
|
700 |
|
void |
701 |
960 |
SMP_Ready(void) |
702 |
|
{ |
703 |
|
struct smp_sc *sc; |
704 |
|
|
705 |
960 |
ASSERT_CLI(); |
706 |
960 |
do { |
707 |
2286 |
VTAILQ_FOREACH(sc, &silos, list) |
708 |
1326 |
if (!(sc->flags & SMP_SC_LOADED)) |
709 |
4 |
break; |
710 |
964 |
if (sc != NULL) |
711 |
4 |
(void)sleep(1); |
712 |
964 |
} while (sc != NULL); |
713 |
960 |
} |