varnish-cache/bin/varnishd/storage/storage_persistent.c
0
/*-
1
 * Copyright (c) 2008-2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * Persistent storage method
30
 *
31
 * XXX: Before we start the client or maybe after it stops, we should give the
32
 * XXX: stevedores a chance to examine their storage for consistency.
33
 *
34
 * XXX: Do we ever free the LRU-lists ?
35
 */
36
37
#include "config.h"
38
39
#include "cache/cache_varnishd.h"
40
41
#include <sys/mman.h>
42
43
#include <stdio.h>
44
#include <stdlib.h>
45
46
#include "cache/cache_obj.h"
47
#include "cache/cache_objhead.h"
48
#include "storage/storage.h"
49
#include "storage/storage_simple.h"
50
51
#include "vcli_serve.h"
52
#include "vsha256.h"
53
#include "vtim.h"
54
55
#include "storage/storage_persistent.h"
56
57
static struct obj_methods smp_oc_realmethods;
58
59
static struct VSC_lck *lck_smp;
60
61
static void smp_init(void);
62
63
/*--------------------------------------------------------------------*/
64
65
/*
66
 * silos is unlocked, it only changes during startup when we are
67
 * single-threaded
68
 */
69
static VTAILQ_HEAD(,smp_sc)     silos = VTAILQ_HEAD_INITIALIZER(silos);
70
71
/*--------------------------------------------------------------------
72
 * Add bans to silos
73
 */
74
75
static int
76 2400
smp_appendban(const struct smp_sc *sc, struct smp_signspace *spc,
77
    uint32_t len, const uint8_t *ban)
78
{
79
80 2400
        (void)sc;
81 2400
        if (SIGNSPACE_FREE(spc) < len)
82 0
                return (-1);
83
84 2400
        memcpy(SIGNSPACE_FRONT(spc), ban, len);
85 2400
        smp_append_signspace(spc, len);
86
87 2400
        return (0);
88 2400
}
89
90
/* Trust that cache_ban.c takes care of locking */
91
92
static int
93 1725
smp_baninfo(const struct stevedore *stv, enum baninfo event,
94
            const uint8_t *ban, unsigned len)
95
{
96
        struct smp_sc *sc;
97 1725
        int r = 0;
98
99 1725
        CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC);
100
101 1725
        switch (event) {
102
        case BI_NEW:
103 1200
                r |= smp_appendban(sc, &sc->ban1, len, ban);
104 1200
                r |= smp_appendban(sc, &sc->ban2, len, ban);
105 1200
                break;
106
        default:
107
                /* Ignored */
108 525
                break;
109
        }
110
111 1725
        return (r);
112
}
113
114
static void
115 3750
smp_banexport_spc(struct smp_signspace *spc, const uint8_t *bans, unsigned len)
116
{
117 3750
        smp_reset_signspace(spc);
118 3750
        assert(SIGNSPACE_FREE(spc) >= len);
119 3750
        memcpy(SIGNSPACE_DATA(spc), bans, len);
120 3750
        smp_append_signspace(spc, len);
121 3750
        smp_sync_sign(&spc->ctx);
122 3750
}
123
124
static void
125 1875
smp_banexport(const struct stevedore *stv, const uint8_t *bans, unsigned len)
126
{
127
        struct smp_sc *sc;
128
129 1875
        CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC);
130 1875
        smp_banexport_spc(&sc->ban1, bans, len);
131 1875
        smp_banexport_spc(&sc->ban2, bans, len);
132 1875
}
133
134
/*--------------------------------------------------------------------
135
 * Attempt to open and read in a ban list
136
 */
137
138
static int
139 950
smp_open_bans(const struct smp_sc *sc, struct smp_signspace *spc)
140
{
141
        uint8_t *ptr, *pe;
142
        int i;
143
144 950
        ASSERT_CLI();
145 950
        (void)sc;
146 950
        i = smp_chk_signspace(spc);
147 950
        if (i)
148 0
                return (i);
149
150 950
        ptr = SIGNSPACE_DATA(spc);
151 950
        pe = SIGNSPACE_FRONT(spc);
152 950
        BAN_Reload(ptr, pe - ptr);
153
154 950
        return (0);
155 950
}
156
157
/*--------------------------------------------------------------------
158
 * Attempt to open and read in a segment list
159
 */
160
161
static int
162 950
smp_open_segs(struct smp_sc *sc, struct smp_signspace *spc)
163
{
164
        uint64_t length, l;
165
        struct smp_segptr *ss, *se;
166
        struct smp_seg *sg, *sg1, *sg2;
167 950
        int i, n = 0;
168
169 950
        ASSERT_CLI();
170 950
        i = smp_chk_signspace(spc);
171 950
        if (i)
172 0
                return (i);
173
174 950
        ss = SIGNSPACE_DATA(spc);
175 950
        length = SIGNSPACE_LEN(spc);
176
177 950
        if (length == 0) {
178
                /* No segments */
179 575
                sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
180 575
                return (0);
181
        }
182 375
        se = ss + length / sizeof *ss;
183 375
        se--;
184 375
        assert(ss <= se);
185
186
        /*
187
         * Locate the free reserve, there are only two basic cases,
188
         * but once we start dropping segments, things gets more complicated.
189
         */
190
191 375
        sc->free_offset = se->offset + se->length;
192 375
        l = sc->mediasize - sc->free_offset;
193 375
        if (se->offset > ss->offset && l >= sc->free_reserve) {
194
                /*
195
                 * [__xxxxyyyyzzzz___]
196
                 * Plenty of space at tail, do nothing.
197
                 */
198 375
        } else if (ss->offset > se->offset) {
199
                /*
200
                 * [zzzz____xxxxyyyy_]
201
                 * (make) space between ends
202
                 * We might nuke the entire tail end without getting
203
                 * enough space, in which case we fall through to the
204
                 * last check.
205
                 */
206 0
                while (ss < se && ss->offset > se->offset) {
207 0
                        l = ss->offset - (se->offset + se->length);
208 0
                        if (l > sc->free_reserve)
209 0
                                break;
210 0
                        ss++;
211 0
                        n++;
212
                }
213 0
        }
214
215 375
        if (l < sc->free_reserve) {
216
                /*
217
                 * [__xxxxyyyyzzzz___]
218
                 * (make) space at front
219
                 */
220 0
                sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF];
221 0
                while (ss < se) {
222 0
                        l = ss->offset - sc->free_offset;
223 0
                        if (l > sc->free_reserve)
224 0
                                break;
225 0
                        ss++;
226 0
                        n++;
227
                }
228 0
        }
229
230 375
        assert(l >= sc->free_reserve);
231
232
233 375
        sg1 = NULL;
234 375
        sg2 = NULL;
235 750
        for (; ss <= se; ss++) {
236 375
                ALLOC_OBJ(sg, SMP_SEG_MAGIC);
237 375
                AN(sg);
238 375
                VTAILQ_INIT(&sg->objcores);
239 375
                sg->p = *ss;
240
241 375
                sg->flags |= SMP_SEG_MUSTLOAD;
242
243
                /*
244
                 * HACK: prevent save_segs from nuking segment until we have
245
                 * HACK: loaded it.
246
                 */
247 375
                sg->nobj = 1;
248 375
                if (sg1 != NULL) {
249 0
                        assert(sg1->p.offset != sg->p.offset);
250 0
                        if (sg1->p.offset < sg->p.offset)
251 0
                                assert(smp_segend(sg1) <= sg->p.offset);
252
                        else
253 0
                                assert(smp_segend(sg) <= sg1->p.offset);
254 0
                }
255 375
                if (sg2 != NULL) {
256 0
                        assert(sg2->p.offset != sg->p.offset);
257 0
                        if (sg2->p.offset < sg->p.offset)
258 0
                                assert(smp_segend(sg2) <= sg->p.offset);
259
                        else
260 0
                                assert(smp_segend(sg) <= sg2->p.offset);
261 0
                }
262
263
                /* XXX: check that they are inside silo */
264
                /* XXX: check that they don't overlap */
265
                /* XXX: check that they are serial */
266 375
                sg->sc = sc;
267 375
                VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
268 375
                sg2 = sg;
269 375
                if (sg1 == NULL)
270 375
                        sg1 = sg;
271 375
        }
272 375
        printf("Dropped %d segments to make free_reserve\n", n);
273 375
        return (0);
274 950
}
275
276
/*--------------------------------------------------------------------
277
 * Silo worker thread
278
 */
279
280
static void * v_matchproto_(bgthread_t)
281 950
smp_thread(struct worker *wrk, void *priv)
282
{
283
        struct smp_sc   *sc;
284
        struct smp_seg *sg;
285
286 950
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
287 950
        CAST_OBJ_NOTNULL(sc, priv, SMP_SC_MAGIC);
288 950
        sc->thread = pthread_self();
289
290
        /* First, load all the objects from all segments */
291 2275
        VTAILQ_FOREACH(sg, &sc->segments, list)
292 1700
                if (sg->flags & SMP_SEG_MUSTLOAD)
293 375
                        smp_load_seg(wrk, sc, sg);
294
295 950
        sc->flags |= SMP_SC_LOADED;
296 950
        BAN_Release();
297 950
        printf("Silo completely loaded\n");
298
299
        /* Housekeeping loop */
300 950
        Lck_Lock(&sc->mtx);
301 2052
        while (!(sc->flags & SMP_SC_STOP)) {
302 1102
                sg = VTAILQ_FIRST(&sc->segments);
303 1102
                if (sg != NULL && sg != sc->cur_seg && sg->nobj == 0)
304 48
                        smp_save_segs(sc);
305
306 1102
                Lck_Unlock(&sc->mtx);
307 1102
                VTIM_sleep(3.14159265359 - 2);
308 1102
                Lck_Lock(&sc->mtx);
309
        }
310
311 950
        smp_save_segs(sc);
312
313 950
        Lck_Unlock(&sc->mtx);
314 950
        pthread_exit(0);
315
316
        NEEDLESS(return (NULL));
317
}
318
319
/*--------------------------------------------------------------------
320
 * Open a silo in the worker process
321
 */
322
323
static void v_matchproto_(storage_open_f)
324 950
smp_open(struct stevedore *st)
325
{
326
        struct smp_sc   *sc;
327
328 950
        ASSERT_CLI();
329
330 950
        if (VTAILQ_EMPTY(&silos))
331 725
                smp_init();
332
333 950
        CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
334
335 950
        Lck_New(&sc->mtx, lck_smp);
336 950
        Lck_Lock(&sc->mtx);
337
338 950
        sc->stevedore = st;
339
340
        /* We trust the parent to give us a valid silo, for good measure: */
341 950
        AZ(smp_valid_silo(sc));
342
343 950
        AZ(mprotect((void*)sc->base, 4096, PROT_READ));
344
345 950
        sc->ident = SIGN_DATA(&sc->idn);
346
347
        /* Check ban lists */
348 950
        if (smp_chk_signspace(&sc->ban1)) {
349
                /* Ban list 1 is broken, use ban2 */
350 0
                AZ(smp_chk_signspace(&sc->ban2));
351 0
                smp_copy_signspace(&sc->ban1, &sc->ban2);
352 0
                smp_sync_sign(&sc->ban1.ctx);
353 0
        } else {
354
                /* Ban1 is OK, copy to ban2 for consistency */
355 950
                smp_copy_signspace(&sc->ban2, &sc->ban1);
356 950
                smp_sync_sign(&sc->ban2.ctx);
357
        }
358 950
        AZ(smp_open_bans(sc, &sc->ban1));
359
360
        /* We attempt seg1 first, and if that fails, try seg2 */
361 950
        if (smp_open_segs(sc, &sc->seg1))
362 0
                AZ(smp_open_segs(sc, &sc->seg2));
363
364
        /*
365
         * Grap a reference to the tail of the ban list, until the thread
366
         * has loaded all objects, so we can be sure that all of our
367
         * proto-bans survive until then.
368
         */
369 950
        BAN_Hold();
370
371
        /* XXX: save segments to ensure consistency between seg1 & seg2 ? */
372
373
        /* XXX: abandon early segments to make sure we have free space ? */
374
375 950
        (void)ObjSubscribeEvents(smp_oc_event, st,
376
            OEV_BANCHG|OEV_TTLCHG|OEV_INSERT);
377
378
        /* Open a new segment, so we are ready to write */
379 950
        smp_new_seg(sc);
380
381
        /* Start the worker silo worker thread, it will load the objects */
382 950
        WRK_BgThread(&sc->bgthread, "persistence", smp_thread, sc);
383
384 950
        VTAILQ_INSERT_TAIL(&silos, sc, list);
385 950
        Lck_Unlock(&sc->mtx);
386 950
}
387
388
/*--------------------------------------------------------------------
389
 * Close a silo
390
 */
391
392
static void v_matchproto_(storage_close_f)
393 1850
smp_close(const struct stevedore *st, int warn)
394
{
395
        struct smp_sc   *sc;
396
        void *status;
397
398 1850
        ASSERT_CLI();
399
400 1850
        CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
401 1850
        if (warn) {
402 925
                Lck_Lock(&sc->mtx);
403 925
                if (sc->cur_seg != NULL)
404 925
                        smp_close_seg(sc, sc->cur_seg);
405 925
                AZ(sc->cur_seg);
406 925
                sc->flags |= SMP_SC_STOP;
407 925
                Lck_Unlock(&sc->mtx);
408 925
        } else {
409 925
                PTOK(pthread_join(sc->bgthread, &status));
410 925
                AZ(status);
411
        }
412 1850
}
413
414
/*--------------------------------------------------------------------
415
 * Allocate a bite.
416
 *
417
 * Allocate [min_size...max_size] space from the bottom of the segment,
418
 * as is convenient.
419
 *
420
 * If 'so' + 'idx' is given, also allocate a smp_object from the top
421
 * of the segment.
422
 *
423
 * Return the segment in 'ssg' if given.
424
 */
425
426
static struct storage *
427 700
smp_allocx(const struct stevedore *st, size_t min_size, size_t max_size,
428
    struct smp_object **so, unsigned *idx, struct smp_seg **ssg)
429
{
430
        struct smp_sc *sc;
431
        struct storage *ss;
432
        struct smp_seg *sg;
433
        uint64_t left, extra;
434
435 700
        CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC);
436 700
        assert(min_size <= max_size);
437
438 700
        max_size = IRNUP(sc, max_size);
439 700
        min_size = IRNUP(sc, min_size);
440
441 700
        extra = IRNUP(sc, sizeof(*ss));
442 700
        if (so != NULL) {
443 550
                extra += sizeof(**so);
444 550
                AN(idx);
445 550
        }
446
447 700
        Lck_Lock(&sc->mtx);
448 700
        sg = NULL;
449 700
        ss = NULL;
450
451 700
        left = 0;
452 700
        if (sc->cur_seg != NULL)
453 700
                left = smp_spaceleft(sc, sc->cur_seg);
454 700
        if (left < extra + min_size) {
455 0
                if (sc->cur_seg != NULL)
456 0
                        smp_close_seg(sc, sc->cur_seg);
457 0
                smp_new_seg(sc);
458 0
                if (sc->cur_seg != NULL)
459 0
                        left = smp_spaceleft(sc, sc->cur_seg);
460
                else
461 0
                        left = 0;
462 0
        }
463
464 700
        if (left >= extra + min_size)  {
465 700
                AN(sc->cur_seg);
466 700
                if (left < extra + max_size)
467 0
                        max_size = IRNDN(sc, left - extra);
468
469 700
                sg = sc->cur_seg;
470 700
                ss = (void*)(sc->base + sc->next_bot);
471 700
                sc->next_bot += max_size + IRNUP(sc, sizeof(*ss));
472 700
                sg->nalloc++;
473 700
                if (so != NULL) {
474 550
                        sc->next_top -= sizeof(**so);
475 550
                        *so = (void*)(sc->base + sc->next_top);
476
                        /* Render this smp_object mostly harmless */
477 550
                        EXP_ZERO((*so));
478 550
                        (*so)->ban = 0.;
479 550
                        (*so)->ptr = 0;
480 550
                        sg->objs = *so;
481 550
                        *idx = ++sg->p.lobjlist;
482 550
                }
483 700
                (void)smp_spaceleft(sc, sg);    /* for the assert */
484 700
        }
485 700
        Lck_Unlock(&sc->mtx);
486
487 700
        if (ss == NULL)
488 0
                return (ss);
489 700
        AN(sg);
490 700
        assert(max_size >= min_size);
491
492
        /* Fill the storage structure */
493 700
        INIT_OBJ(ss, STORAGE_MAGIC);
494 700
        ss->ptr = PRNUP(sc, ss + 1);
495 700
        ss->space = max_size;
496 700
        ss->priv = sc->base;
497 700
        if (ssg != NULL)
498 550
                *ssg = sg;
499 700
        return (ss);
500 700
}
501
502
/*--------------------------------------------------------------------
503
 * Allocate an object
504
 */
505
506
static int v_matchproto_(storage_allocobj_f)
507 550
smp_allocobj(struct worker *wrk, const struct stevedore *stv,
508
    struct objcore *oc, unsigned wsl)
509
{
510
        struct object *o;
511
        struct storage *st;
512
        struct smp_sc   *sc;
513
        struct smp_seg *sg;
514
        struct smp_object *so;
515
        unsigned objidx;
516
        unsigned ltot;
517
518 550
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
519 550
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
520 550
        CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC);
521
522
        /* Don't entertain already dead objects */
523 550
        if (oc->flags & OC_F_DYING)
524 0
                return (0);
525 550
        if (oc->t_origin <= 0.)
526 0
                return (0);
527 550
        if (oc->ttl + oc->grace + oc->keep <= 0.)
528 0
                return (0);
529
530 550
        ltot = sizeof(struct object) + PRNDUP(wsl);
531 550
        ltot = IRNUP(sc, ltot);
532
533 550
        st = NULL;
534 550
        sg = NULL;
535 550
        so = NULL;
536 550
        objidx = 0;
537
538 550
        do {
539 550
                st = smp_allocx(stv, ltot, ltot, &so, &objidx, &sg);
540 550
                if (st != NULL && st->space < ltot) {
541 0
                        stv->sml_free(st);              // NOP
542 0
                        st = NULL;
543 0
                }
544 550
        } while (st == NULL && LRU_NukeOne(wrk, stv->lru));
545 550
        if (st == NULL)
546 0
                return (0);
547
548 550
        AN(st);
549 550
        AN(sg);
550 550
        AN(so);
551 550
        assert(st->space >= ltot);
552
553 550
        o = SML_MkObject(stv, oc, st->ptr);
554 550
        AN(oc->stobj->stevedore);
555 550
        assert(oc->stobj->stevedore == stv);
556 550
        CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
557 550
        o->objstore = st;
558 550
        st->len = sizeof(*o);
559
560 550
        Lck_Lock(&sc->mtx);
561 550
        sg->nfixed++;
562 550
        sg->nobj++;
563
564
        /* We have to do this somewhere, might as well be here... */
565 550
        assert(sizeof so->hash == DIGEST_LEN);
566 550
        memcpy(so->hash, oc->objhead->digest, DIGEST_LEN);
567 550
        EXP_COPY(so, oc);
568 550
        so->ptr = (uint8_t*)(o->objstore) - sc->base;
569 550
        so->ban = BAN_Time(oc->ban);
570
571 550
        smp_init_oc(oc, sg, objidx);
572
573 550
        VTAILQ_INSERT_TAIL(&sg->objcores, oc, lru_list);
574 550
        Lck_Unlock(&sc->mtx);
575 550
        return (1);
576 550
}
577
578
/*--------------------------------------------------------------------
579
 * Allocate a bite
580
 */
581
582
static struct storage * v_matchproto_(sml_alloc_f)
583 150
smp_alloc(const struct stevedore *st, size_t size)
584
{
585
586 300
        return (smp_allocx(st,
587 150
            size > 4096 ? 4096 : size, size, NULL, NULL, NULL));
588
}
589
590
/*--------------------------------------------------------------------*/
591
592
const struct stevedore smp_stevedore = {
593
        .magic          = STEVEDORE_MAGIC,
594
        .name           = "deprecated_persistent",
595
        .init           = smp_mgt_init,
596
        .open           = smp_open,
597
        .close          = smp_close,
598
        .allocobj       = smp_allocobj,
599
        .baninfo        = smp_baninfo,
600
        .banexport      = smp_banexport,
601
        .methods        = &smp_oc_realmethods,
602
603
        .sml_alloc      = smp_alloc,
604
        .sml_free       = NULL,
605
        .sml_getobj     = smp_sml_getobj,
606
};
607
608
/*--------------------------------------------------------------------
609
 * Persistence is a bear to test unadulterated, so we cheat by adding
610
 * a cli command we can use to make it do tricks for us.
611
 */
612
613
static void
614 75
debug_report_silo(struct cli *cli, const struct smp_sc *sc)
615
{
616
        struct smp_seg *sg;
617
618 150
        VCLI_Out(cli, "Silo: %s (%s)\n",
619 75
            sc->stevedore->ident, sc->filename);
620 175
        VTAILQ_FOREACH(sg, &sc->segments, list) {
621 200
                VCLI_Out(cli, "  Seg: [0x%jx ... +0x%jx]\n",
622 100
                   (uintmax_t)sg->p.offset, (uintmax_t)sg->p.length);
623 100
                if (sg == sc->cur_seg)
624 150
                        VCLI_Out(cli,
625
                           "    Alloc: [0x%jx ... 0x%jx] = 0x%jx free\n",
626 75
                           (uintmax_t)(sc->next_bot),
627 75
                           (uintmax_t)(sc->next_top),
628 75
                           (uintmax_t)(sc->next_top - sc->next_bot));
629 200
                VCLI_Out(cli, "    %u nobj, %u alloc, %u lobjlist, %u fixed\n",
630 100
                    sg->nobj, sg->nalloc, sg->p.lobjlist, sg->nfixed);
631 100
        }
632 75
}
633
634
static void v_matchproto_(cli_func_t)
635 150
debug_persistent(struct cli *cli, const char * const * av, void *priv)
636
{
637
        struct smp_sc *sc;
638
639 150
        (void)priv;
640
641 150
        if (av[2] == NULL) {
642 0
                VTAILQ_FOREACH(sc, &silos, list)
643 0
                        debug_report_silo(cli, sc);
644 0
                return;
645
        }
646 150
        VTAILQ_FOREACH(sc, &silos, list)
647 150
                if (!strcmp(av[2], sc->stevedore->ident))
648 150
                        break;
649 150
        if (sc == NULL) {
650 0
                VCLI_Out(cli, "Silo <%s> not found\n", av[2]);
651 0
                VCLI_SetResult(cli, CLIS_PARAM);
652 0
                return;
653
        }
654 150
        if (av[3] == NULL) {
655 0
                debug_report_silo(cli, sc);
656 0
                return;
657
        }
658 150
        Lck_Lock(&sc->mtx);
659 150
        if (!strcmp(av[3], "sync")) {
660 75
                if (sc->cur_seg != NULL)
661 75
                        smp_close_seg(sc, sc->cur_seg);
662 75
                smp_new_seg(sc);
663 150
        } else if (!strcmp(av[3], "dump")) {
664 75
                debug_report_silo(cli, sc);
665 75
        } else {
666 0
                VCLI_Out(cli, "Unknown operation\n");
667 0
                VCLI_SetResult(cli, CLIS_PARAM);
668
        }
669 150
        Lck_Unlock(&sc->mtx);
670 150
}
671
672
static struct cli_proto debug_cmds[] = {
673
        { CLICMD_DEBUG_PERSISTENT,              "d", debug_persistent },
674
        { NULL }
675
};
676
677
/*--------------------------------------------------------------------
678
 */
679
680
static void
681 725
smp_init(void)
682
{
683 725
        lck_smp = Lck_CreateClass(NULL, "smp");
684 725
        CLI_AddFuncs(debug_cmds);
685 725
        smp_oc_realmethods.objfree = SML_methods.objfree;
686 725
        smp_oc_realmethods.objiterator = SML_methods.objiterator;
687 725
        smp_oc_realmethods.objgetspace = SML_methods.objgetspace;
688 725
        smp_oc_realmethods.objextend = SML_methods.objextend;
689 725
        smp_oc_realmethods.objbocdone = SML_methods.objbocdone;
690 725
        smp_oc_realmethods.objgetattr = SML_methods.objgetattr;
691 725
        smp_oc_realmethods.objsetattr = SML_methods.objsetattr;
692 725
        smp_oc_realmethods.objtouch = LRU_Touch;
693 725
        smp_oc_realmethods.objfree = smp_oc_objfree;
694 725
}
695
696
/*--------------------------------------------------------------------
697
 * Pause until all silos have loaded.
698
 */
699
700
void
701 600
SMP_Ready(void)
702
{
703
        struct smp_sc *sc;
704
705 600
        ASSERT_CLI();
706 600
        do {
707 1425
                VTAILQ_FOREACH(sc, &silos, list)
708 825
                        if (!(sc->flags & SMP_SC_LOADED))
709 0
                                break;
710 600
                if (sc != NULL)
711 0
                        (void)sleep(1);
712 600
        } while (sc != NULL);
713 600
}