varnish-cache/bin/varnishd/storage/storage_persistent_silo.c
0
/*-
1
 * Copyright (c) 2008-2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * Persistent storage method
30
 *
31
 * XXX: Before we start the client or maybe after it stops, we should give the
32
 * XXX: stevedores a chance to examine their storage for consistency.
33
 *
34
 */
35
36
#include "config.h"
37
38
39
#include <stdio.h>
40
#include <stdlib.h>
41
42
#include "cache/cache_varnishd.h"
43
44
#include "vsha256.h"
45
#include "vend.h"
46
#include "vtim.h"
47
48
#include "cache/cache_objhead.h"
49
50
#include "storage/storage.h"
51
#include "storage/storage_simple.h"
52
#include "storage/storage_persistent.h"
53
54
/*
55
 * We use the top bit to mark objects still needing fixup
56
 * In theory this may need to be platform dependent
57
 */
58
59
#define NEED_FIXUP      (1U << 31)
60
61
/*--------------------------------------------------------------------
62
 * Write the segmentlist back to the silo.
63
 *
64
 * We write the first copy, sync it synchronously, then write the
65
 * second copy and sync it synchronously.
66
 *
67
 * Provided the kernel doesn't lie, that means we will always have
68
 * at least one valid copy on in the silo.
69
 */
70
71
static void
72 4623
smp_save_seg(const struct smp_sc *sc, struct smp_signspace *spc)
73
{
74
        struct smp_segptr *ss;
75
        struct smp_seg *sg;
76
        uint64_t length;
77
78 4623
        Lck_AssertHeld(&sc->mtx);
79 4623
        smp_reset_signspace(spc);
80 4623
        ss = SIGNSPACE_DATA(spc);
81 4623
        length = 0;
82 8687
        VTAILQ_FOREACH(sg, &sc->segments, list) {
83 4064
                assert(sg->p.offset < sc->mediasize);
84 4064
                assert(sg->p.offset + sg->p.length <= sc->mediasize);
85 4064
                *ss = sg->p;
86 4064
                ss++;
87 4064
                length += sizeof *ss;
88 4064
        }
89 4623
        smp_append_signspace(spc, length);
90 4623
        smp_sync_sign(&spc->ctx);
91 4623
}
92
93
void
94 2311
smp_save_segs(struct smp_sc *sc)
95
{
96
        struct smp_seg *sg, *sg2;
97
98 2311
        CHECK_OBJ_NOTNULL(sc, SMP_SC_MAGIC);
99 2311
        Lck_AssertHeld(&sc->mtx);
100
101
        /*
102
         * Remove empty segments from the front of the list
103
         * before we write the segments to disk.
104
         */
105 2511
        VTAILQ_FOREACH_SAFE(sg, &sc->segments, list, sg2) {
106 2112
                CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
107
108 2112
                if (sg->nobj > 0)
109 1912
                        break;
110 200
                if (sg == sc->cur_seg)
111 40
                        continue;
112 160
                VTAILQ_REMOVE(&sc->segments, sg, list);
113 160
                AN(VTAILQ_EMPTY(&sg->objcores));
114 160
                FREE_OBJ(sg);
115 160
        }
116 2311
        smp_save_seg(sc, &sc->seg1);
117 2311
        smp_save_seg(sc, &sc->seg2);
118 2311
}
119
120
/*--------------------------------------------------------------------
121
 * Load segments
122
 *
123
 * The overall objective is to register the existence of an object, based
124
 * only on the minimally sized struct smp_object, without causing the
125
 * main object to be faulted in.
126
 *
127
 * XXX: We can test this by mprotecting the main body of the segment
128
 * XXX: until the first fixup happens, or even just over this loop,
129
 * XXX: However: the requires that the smp_objects starter further
130
 * XXX: into the segment than a page so that they do not get hit
131
 * XXX: by the protection.
132
 */
133
134
void
135 600
smp_load_seg(struct worker *wrk, const struct smp_sc *sc,
136
    struct smp_seg *sg)
137
{
138
        struct smp_object *so;
139
        struct objcore *oc;
140
        struct ban *ban;
141
        uint32_t no;
142 600
        double t_now = VTIM_real();
143
        struct smp_signctx ctx[1];
144
145 600
        ASSERT_SILO_THREAD(sc);
146 600
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
147 600
        CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
148 600
        assert(sg->flags & SMP_SEG_MUSTLOAD);
149 600
        sg->flags &= ~SMP_SEG_MUSTLOAD;
150 600
        AN(sg->p.offset);
151 600
        if (sg->p.objlist == 0)
152 0
                return;
153 600
        smp_def_sign(sc, ctx, sg->p.offset, "SEGHEAD");
154 600
        if (smp_chk_sign(ctx))
155 0
                return;
156
157
        /* test SEGTAIL */
158
        /* test OBJIDX */
159 600
        so = (void*)(sc->base + sg->p.objlist);
160 600
        sg->objs = so;
161 600
        no = sg->p.lobjlist;
162
        /* Clear the bogus "hold" count */
163 600
        sg->nobj = 0;
164 1280
        for (;no > 0; so++,no--) {
165 680
                if (EXP_WHEN(so) < t_now)
166 0
                        continue;
167 680
                ban = BAN_FindBan(so->ban);
168 680
                AN(ban);
169 680
                oc = ObjNew(wrk);
170 680
                oc->stobj->stevedore = sc->parent;
171 680
                smp_init_oc(oc, sg, no);
172 680
                VTAILQ_INSERT_TAIL(&sg->objcores, oc, lru_list);
173 680
                oc->stobj->priv2 |= NEED_FIXUP;
174 680
                EXP_COPY(oc, so);
175 680
                sg->nobj++;
176 680
                oc->refcnt++;
177 680
                HSH_Insert(wrk, so->hash, oc, ban);
178 680
                AN(oc->ban);
179 680
                HSH_DerefBoc(wrk, oc);  // XXX Keep it an stream resurrection?
180 680
                (void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY);
181 680
                wrk->stats->n_vampireobject++;
182 680
        }
183 600
        Pool_Sumstat(wrk);
184 600
        sg->flags |= SMP_SEG_LOADED;
185 600
}
186
187
/*--------------------------------------------------------------------
188
 * Create a new segment
189
 */
190
191
void
192 1640
smp_new_seg(struct smp_sc *sc)
193
{
194
        struct smp_seg tmpsg;
195
        struct smp_seg *sg;
196
197 1640
        AZ(sc->cur_seg);
198 1640
        Lck_AssertHeld(&sc->mtx);
199
200
        /* XXX: find where it goes in silo */
201
202 1640
        INIT_OBJ(&tmpsg, SMP_SEG_MAGIC);
203 1640
        tmpsg.sc = sc;
204 1640
        tmpsg.p.offset = sc->free_offset;
205
        /* XXX: align */
206 1640
        assert(tmpsg.p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
207 1640
        assert(tmpsg.p.offset < sc->mediasize);
208
209 1640
        tmpsg.p.length = sc->aim_segl;
210 1640
        tmpsg.p.length = RDN2(tmpsg.p.length, 8);
211
212 1640
        if (smp_segend(&tmpsg) > sc->mediasize)
213
                /* XXX: Consider truncation in this case */
214 0
                tmpsg.p.offset = sc->ident->stuff[SMP_SPC_STUFF];
215
216 1640
        assert(smp_segend(&tmpsg) <= sc->mediasize);
217
218 1640
        sg = VTAILQ_FIRST(&sc->segments);
219 1640
        if (sg != NULL && tmpsg.p.offset <= sg->p.offset) {
220 0
                if (smp_segend(&tmpsg) > sg->p.offset)
221
                        /* No more space, return (cur_seg will be NULL) */
222
                        /* XXX: Consider truncation instead of failing */
223 0
                        return;
224 0
                assert(smp_segend(&tmpsg) <= sg->p.offset);
225 0
        }
226
227 1640
        if (tmpsg.p.offset == sc->ident->stuff[SMP_SPC_STUFF])
228 920
                printf("Wrapped silo\n");
229
230 1640
        ALLOC_OBJ(sg, SMP_SEG_MAGIC);
231 1640
        if (sg == NULL)
232 0
                return;
233 1640
        *sg = tmpsg;
234 1640
        VTAILQ_INIT(&sg->objcores);
235
236 1640
        sg->p.offset = IRNUP(sc, sg->p.offset);
237 1640
        sg->p.length -= sg->p.offset - tmpsg.p.offset;
238 1640
        sg->p.length = IRNDN(sc, sg->p.length);
239 1640
        assert(sg->p.offset + sg->p.length <= tmpsg.p.offset + tmpsg.p.length);
240 1640
        sc->free_offset = sg->p.offset + sg->p.length;
241
242 1640
        VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
243
244
        /* Neuter the new segment in case there is an old one there */
245 1640
        AN(sg->p.offset);
246 1640
        smp_def_sign(sc, sg->ctx, sg->p.offset, "SEGHEAD");
247 1640
        smp_reset_sign(sg->ctx);
248 1640
        smp_sync_sign(sg->ctx);
249
250
        /* Set up our allocation points */
251 1640
        sc->cur_seg = sg;
252 1640
        sc->next_bot = sg->p.offset + IRNUP(sc, SMP_SIGN_SPACE);
253 1640
        sc->next_top = smp_segend(sg);
254 1640
        sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
255 1640
        IASSERTALIGN(sc, sc->next_bot);
256 1640
        IASSERTALIGN(sc, sc->next_top);
257 1640
        sg->objs = (void*)(sc->base + sc->next_top);
258 1640
}
259
260
/*--------------------------------------------------------------------
261
 * Close a segment
262
 */
263
264
void
265 1600
smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
266
{
267
        uint64_t left, dst, len;
268
        void *dp;
269
270 1600
        CHECK_OBJ_NOTNULL(sc, SMP_SC_MAGIC);
271 1600
        Lck_AssertHeld(&sc->mtx);
272
273 1600
        CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
274 1600
        assert(sg == sc->cur_seg);
275 1600
        AN(sg->p.offset);
276 1600
        sc->cur_seg = NULL;
277
278 1600
        if (sg->nalloc == 0) {
279
                /* If segment is empty, delete instead */
280 840
                VTAILQ_REMOVE(&sc->segments, sg, list);
281 840
                assert(sg->p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
282 840
                assert(sg->p.offset < sc->mediasize);
283 840
                sc->free_offset = sg->p.offset;
284 840
                AN(VTAILQ_EMPTY(&sg->objcores));
285 840
                FREE_OBJ(sg);
286 840
                return;
287
        }
288
289
        /*
290
         * If there is enough space left, that we can move the smp_objects
291
         * down without overwriting the present copy, we will do so to
292
         * compact the segment.
293
         */
294 760
        left = smp_spaceleft(sc, sg);
295 760
        len = sizeof(struct smp_object) * sg->p.lobjlist;
296 760
        if (len < left) {
297 760
                dst = sc->next_bot + IRNUP(sc, SMP_SIGN_SPACE);
298 760
                dp = sc->base + dst;
299 760
                assert((uintptr_t)dp + len < (uintptr_t)sg->objs);
300 760
                memcpy(dp, sg->objs, len);
301 760
                sc->next_top = dst;
302 760
                sg->objs = dp;
303 1520
                sg->p.length = (sc->next_top - sg->p.offset)
304 760
                     + len + IRNUP(sc, SMP_SIGN_SPACE);
305 760
                (void)smp_spaceleft(sc, sg);    /* for the asserts */
306
307 760
        }
308
309
        /* Update the segment header */
310 760
        sg->p.objlist = sc->next_top;
311
312
        /* Write the (empty) OBJIDX signature */
313 760
        sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
314 760
        assert(sc->next_top >= sc->next_bot);
315 760
        smp_def_sign(sc, sg->ctx, sc->next_top, "OBJIDX");
316 760
        smp_reset_sign(sg->ctx);
317 760
        smp_sync_sign(sg->ctx);
318
319
        /* Write the (empty) SEGTAIL signature */
320 1520
        smp_def_sign(sc, sg->ctx,
321 760
            sg->p.offset + sg->p.length - IRNUP(sc, SMP_SIGN_SPACE), "SEGTAIL");
322 760
        smp_reset_sign(sg->ctx);
323 760
        smp_sync_sign(sg->ctx);
324
325
        /* Save segment list */
326 760
        smp_save_segs(sc);
327 760
        sc->free_offset = smp_segend(sg);
328 1600
}
329
330
331
/*---------------------------------------------------------------------
332
 */
333
334
static struct smp_object *
335 15239
smp_find_so(const struct smp_seg *sg, unsigned priv2)
336
{
337
        struct smp_object *so;
338
339 15239
        priv2 &= ~NEED_FIXUP;
340 15239
        assert(priv2 > 0);
341 15239
        assert(priv2 <= sg->p.lobjlist);
342 15239
        so = &sg->objs[sg->p.lobjlist - priv2];
343 15239
        return (so);
344
}
345
346
/*---------------------------------------------------------------------
347
 * Check if a given storage structure is valid to use
348
 */
349
350
static int
351 80
smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
352
    const struct storage *st)
353
{
354
        struct smp_seg *sg2;
355
        const uint8_t *pst;
356
        uint64_t o;
357
358 80
        (void)sg;               /* XXX: faster: Start search from here */
359 80
        pst = (const void *)st;
360
361 80
        if (pst < (sc->base + sc->ident->stuff[SMP_SPC_STUFF]))
362 0
                return (0x01);          /* Before silo payload start */
363 80
        if (pst > (sc->base + sc->ident->stuff[SMP_END_STUFF]))
364 0
                return (0x02);          /* After silo end */
365
366 80
        o = pst - sc->base;
367
368
        /* Find which segment contains the storage structure */
369 160
        VTAILQ_FOREACH(sg2, &sc->segments, list)
370 120
                if (o > sg2->p.offset && (o + sizeof(*st)) < sg2->p.objlist)
371 40
                        break;
372 80
        if (sg2 == NULL)
373 40
                return (0x04);          /* No claiming segment */
374 40
        if (!(sg2->flags & SMP_SEG_LOADED))
375 0
                return (0x08);          /* Claiming segment not loaded */
376
377
        /* It is now safe to access the storage structure */
378 40
        if (st->magic != STORAGE_MAGIC)
379 0
                return (0x10);          /* Not enough magic */
380
381 40
        if (o + st->space >= sg2->p.objlist)
382 0
                return (0x20);          /* Allocation not inside segment */
383
384 40
        if (st->len > st->space)
385 0
                return (0x40);          /* Plain bad... */
386
387
        /*
388
         * XXX: We could patch up st->stevedore and st->priv here
389
         * XXX: but if things go right, we will never need them.
390
         */
391 40
        return (0);
392 80
}
393
394
/*---------------------------------------------------------------------
395
 * objcore methods for persistent objects
396
 */
397
398
static void
399 75720
fix_ptr(const struct smp_seg *sg, const struct storage *st, void **ptr)
400
{
401
        // See comment where used below
402
        uintptr_t u;
403
404 75720
        u = (uintptr_t)(*ptr);
405 75720
        if (u != 0) {
406 52200
                u -= (uintptr_t)st->priv;
407 52200
                u += (uintptr_t)sg->sc->base;
408 52200
        }
409 75720
        *ptr = (void *)u;
410 75720
}
411
412
struct object * v_matchproto_(sml_getobj_f)
413 11600
smp_sml_getobj(struct worker *wrk, struct objcore *oc)
414
{
415
        struct object *o;
416
        struct smp_seg *sg;
417
        struct smp_object *so;
418
        struct storage *st, *st2;
419
        uint64_t l;
420
        int bad;
421
422 11600
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
423 11600
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
424 11600
        AN(oc->stobj->stevedore);
425
426 11600
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
427 11600
        so = smp_find_so(sg, oc->stobj->priv2);
428
429
        /**************************************************************
430
         * The silo may have been remapped at a different address,
431
         * because the people who came up with ASLR were unable
432
         * imagine that there might be beneficial use-cases for
433
         * always mapping a file at the same specific address.
434
         *
435
         * We store the silos base address in struct storage->priv
436
         * and manually fix all the pointers in struct object and
437
         * the list of struct storage objects which hold the body.
438
         * When done, we update the storage->priv, so we can do the
439
         * same trick next time.
440
         *
441
         * This is a prohibitively expensive workaround, but we can
442
         * live with it, because the role of this stevedore is only
443
         * to keep the internal stevedore API honest.
444
         */
445
446 11600
        st = (void*)(sg->sc->base + so->ptr);
447 11600
        fix_ptr(sg, st, (void**)&st->ptr);
448
449 11600
        o = (void*)st->ptr;
450 11600
        fix_ptr(sg, st, (void**)&o->objstore);
451 11600
        fix_ptr(sg, st, (void**)&o->va_vary);
452 11600
        fix_ptr(sg, st, (void**)&o->va_headers);
453 11600
        fix_ptr(sg, st, (void**)&o->list.vtqh_first);
454 11600
        fix_ptr(sg, st, (void**)&o->list.vtqh_last);
455 11600
        st->priv = (void*)(sg->sc->base);
456
457 11600
        st2 = o->list.vtqh_first;
458 13640
        while (st2 != NULL) {
459 2040
                fix_ptr(sg, st2, (void**)&st2->list.vtqe_next);
460 2040
                fix_ptr(sg, st2, (void**)&st2->list.vtqe_prev);
461 2040
                fix_ptr(sg, st2, (void**)&st2->ptr);
462 2040
                st2->priv = (void*)(sg->sc->base);
463 2040
                st2 = st2->list.vtqe_next;
464
        }
465
466
        /*
467
         * The object may not be in this segment since we allocate it
468
         * In a separate operation than the smp_object.  We could check
469
         * that it is in a later segment, but that would be complicated.
470
         * XXX: For now, be happy if it is inside the silo
471
         */
472 11600
        ASSERT_PTR_IN_SILO(sg->sc, o);
473 11600
        CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
474
475
        /*
476
         * If this flag is not set, it will not be, and the lock is not
477
         * needed to test it.
478
         */
479 11600
        if (!(oc->stobj->priv2 & NEED_FIXUP))
480 11080
                return (o);
481
482 520
        Lck_Lock(&sg->sc->mtx);
483
        /* Check again, we might have raced. */
484 520
        if (oc->stobj->priv2 & NEED_FIXUP) {
485
                /* We trust caller to have a refcnt for us */
486
487 520
                bad = 0;
488 520
                l = 0;
489 560
                VTAILQ_FOREACH(st, &o->list, list) {
490 80
                        bad |= smp_loaded_st(sg->sc, sg, st);
491 80
                        if (bad)
492 40
                                break;
493 40
                        l += st->len;
494 40
                }
495 520
                if (l != vbe64dec(o->fa_len))
496 40
                        bad |= 0x100;
497
498 520
                if (bad) {
499 40
                        EXP_ZERO(oc);
500 40
                        EXP_ZERO(so);
501 40
                }
502
503 520
                sg->nfixed++;
504 520
                wrk->stats->n_object++;
505 520
                wrk->stats->n_vampireobject--;
506 520
                oc->stobj->priv2 &= ~NEED_FIXUP;
507 520
        }
508 520
        Lck_Unlock(&sg->sc->mtx);
509 520
        return (o);
510 11600
}
511
512
void v_matchproto_(objfree_f)
513 160
smp_oc_objfree(struct worker *wrk, struct objcore *oc)
514
{
515
        struct smp_seg *sg;
516
        struct smp_object *so;
517
518 160
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
519 160
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
520
521 160
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
522 160
        so = smp_find_so(sg, oc->stobj->priv2);
523
524 160
        Lck_Lock(&sg->sc->mtx);
525 160
        EXP_ZERO(so);
526 160
        so->ptr = 0;
527
528 160
        assert(sg->nobj > 0);
529 160
        sg->nobj--;
530 160
        if (oc->stobj->priv2 & NEED_FIXUP) {
531 0
                wrk->stats->n_vampireobject--;
532 0
        } else {
533 160
                assert(sg->nfixed > 0);
534 160
                sg->nfixed--;
535 160
                wrk->stats->n_object--;
536
        }
537 160
        VTAILQ_REMOVE(&sg->objcores, oc, lru_list);
538
539 160
        Lck_Unlock(&sg->sc->mtx);
540 160
        memset(oc->stobj, 0, sizeof oc->stobj);
541 160
}
542
543
/*--------------------------------------------------------------------*/
544
545
void
546 1560
smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx)
547
{
548
549 1560
        AZ(objidx & NEED_FIXUP);
550 1560
        oc->stobj->priv = sg;
551 1560
        oc->stobj->priv2 = objidx;
552 1560
}
553
554
/*--------------------------------------------------------------------*/
555
556
void v_matchproto_(obj_event_f)
557 4120
smp_oc_event(struct worker *wrk, void *priv, struct objcore *oc, unsigned ev)
558
{
559
        struct stevedore *st;
560
        struct smp_seg *sg;
561
        struct smp_object *so;
562
563 4120
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
564 4120
        CAST_OBJ_NOTNULL(st, priv, STEVEDORE_MAGIC);
565 4120
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
566
567 4120
        if (oc->stobj->stevedore != st)
568 640
                return;
569
570 3480
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
571 3480
        CHECK_OBJ_NOTNULL(sg->sc, SMP_SC_MAGIC);
572 3480
        so = smp_find_so(sg, oc->stobj->priv2);
573
574 3480
        if (sg == sg->sc->cur_seg) {
575
                /* Lock necessary, we might race close_seg */
576 1760
                Lck_Lock(&sg->sc->mtx);
577 1760
                if (ev & (OEV_BANCHG|OEV_INSERT))
578 880
                        so->ban = BAN_Time(oc->ban);
579 1760
                if (ev & (OEV_TTLCHG|OEV_INSERT))
580 1760
                        EXP_COPY(so, oc);
581 1760
                Lck_Unlock(&sg->sc->mtx);
582 1760
        } else {
583 1720
                if (ev & (OEV_BANCHG|OEV_INSERT))
584 1040
                        so->ban = BAN_Time(oc->ban);
585 1720
                if (ev & (OEV_TTLCHG|OEV_INSERT))
586 1360
                        EXP_COPY(so, oc);
587
        }
588 4120
}
589