varnish-cache/bin/varnishd/storage/storage_persistent_silo.c
0
/*-
1
 * Copyright (c) 2008-2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * Persistent storage method
30
 *
31
 * XXX: Before we start the client or maybe after it stops, we should give the
32
 * XXX: stevedores a chance to examine their storage for consistency.
33
 *
34
 */
35
36
#include "config.h"
37
38
39
#include <stdio.h>
40
#include <stdlib.h>
41
42
#include "cache/cache_varnishd.h"
43
44
#include "vsha256.h"
45
#include "vend.h"
46
#include "vtim.h"
47
48
#include "cache/cache_objhead.h"
49
50
#include "storage/storage.h"
51
#include "storage/storage_simple.h"
52
#include "storage/storage_persistent.h"
53
54
/*
55
 * We use the top bit to mark objects still needing fixup
56
 * In theory this may need to be platform dependent
57
 */
58
59
#define NEED_FIXUP      (1U << 31)
60
61
/*--------------------------------------------------------------------
62
 * Write the segmentlist back to the silo.
63
 *
64
 * We write the first copy, sync it synchronously, then write the
65
 * second copy and sync it synchronously.
66
 *
67
 * Provided the kernel doesn't lie, that means we will always have
68
 * at least one valid copy on in the silo.
69
 */
70
71
static void
72 2896
smp_save_seg(const struct smp_sc *sc, struct smp_signspace *spc)
73
{
74
        struct smp_segptr *ss;
75
        struct smp_seg *sg;
76
        uint64_t length;
77
78 2896
        Lck_AssertHeld(&sc->mtx);
79 2896
        smp_reset_signspace(spc);
80 2896
        ss = SIGNSPACE_DATA(spc);
81 2896
        length = 0;
82 5442
        VTAILQ_FOREACH(sg, &sc->segments, list) {
83 2546
                assert(sg->p.offset < sc->mediasize);
84 2546
                assert(sg->p.offset + sg->p.length <= sc->mediasize);
85 2546
                *ss = sg->p;
86 2546
                ss++;
87 2546
                length += sizeof *ss;
88 2546
        }
89 2896
        smp_append_signspace(spc, length);
90 2896
        smp_sync_sign(&spc->ctx);
91 2896
}
92
93
void
94 1448
smp_save_segs(struct smp_sc *sc)
95
{
96
        struct smp_seg *sg, *sg2;
97
98 1448
        CHECK_OBJ_NOTNULL(sc, SMP_SC_MAGIC);
99 1448
        Lck_AssertHeld(&sc->mtx);
100
101
        /*
102
         * Remove empty segments from the front of the list
103
         * before we write the segments to disk.
104
         */
105 1573
        VTAILQ_FOREACH_SAFE(sg, &sc->segments, list, sg2) {
106 1323
                CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
107
108 1323
                if (sg->nobj > 0)
109 1198
                        break;
110 125
                if (sg == sc->cur_seg)
111 25
                        continue;
112 100
                VTAILQ_REMOVE(&sc->segments, sg, list);
113 100
                AN(VTAILQ_EMPTY(&sg->objcores));
114 100
                FREE_OBJ(sg);
115 100
        }
116 1448
        smp_save_seg(sc, &sc->seg1);
117 1448
        smp_save_seg(sc, &sc->seg2);
118 1448
}
119
120
/*--------------------------------------------------------------------
121
 * Load segments
122
 *
123
 * The overall objective is to register the existence of an object, based
124
 * only on the minimally sized struct smp_object, without causing the
125
 * main object to be faulted in.
126
 *
127
 * XXX: We can test this by mprotecting the main body of the segment
128
 * XXX: until the first fixup happens, or even just over this loop,
129
 * XXX: However: the requires that the smp_objects starter further
130
 * XXX: into the segment than a page so that they do not get hit
131
 * XXX: by the protection.
132
 */
133
134
void
135 375
smp_load_seg(struct worker *wrk, const struct smp_sc *sc,
136
    struct smp_seg *sg)
137
{
138
        struct smp_object *so;
139
        struct objcore *oc;
140
        struct ban *ban;
141
        uint32_t no;
142 375
        double t_now = VTIM_real();
143
        struct smp_signctx ctx[1];
144
145 375
        ASSERT_SILO_THREAD(sc);
146 375
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
147 375
        CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
148 375
        assert(sg->flags & SMP_SEG_MUSTLOAD);
149 375
        sg->flags &= ~SMP_SEG_MUSTLOAD;
150 375
        AN(sg->p.offset);
151 375
        if (sg->p.objlist == 0)
152 0
                return;
153 375
        smp_def_sign(sc, ctx, sg->p.offset, "SEGHEAD");
154 375
        if (smp_chk_sign(ctx))
155 0
                return;
156
157
        /* test SEGTAIL */
158
        /* test OBJIDX */
159 375
        so = (void*)(sc->base + sg->p.objlist);
160 375
        sg->objs = so;
161 375
        no = sg->p.lobjlist;
162
        /* Clear the bogus "hold" count */
163 375
        sg->nobj = 0;
164 800
        for (;no > 0; so++,no--) {
165 425
                if (EXP_WHEN(so) < t_now)
166 0
                        continue;
167 425
                ban = BAN_FindBan(so->ban);
168 425
                AN(ban);
169 425
                oc = ObjNew(wrk);
170 425
                oc->stobj->stevedore = sc->parent;
171 425
                smp_init_oc(oc, sg, no);
172 425
                VTAILQ_INSERT_TAIL(&sg->objcores, oc, lru_list);
173 425
                oc->stobj->priv2 |= NEED_FIXUP;
174 425
                EXP_COPY(oc, so);
175 425
                sg->nobj++;
176 425
                oc->refcnt++;
177 425
                HSH_Insert(wrk, so->hash, oc, ban);
178 425
                AN(oc->ban);
179 425
                HSH_DerefBoc(wrk, oc);  // XXX Keep it an stream resurrection?
180 425
                (void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY);
181 425
                wrk->stats->n_vampireobject++;
182 425
        }
183 375
        Pool_Sumstat(wrk);
184 375
        sg->flags |= SMP_SEG_LOADED;
185 375
}
186
187
/*--------------------------------------------------------------------
188
 * Create a new segment
189
 */
190
191
void
192 1025
smp_new_seg(struct smp_sc *sc)
193
{
194
        struct smp_seg tmpsg;
195
        struct smp_seg *sg;
196
197 1025
        AZ(sc->cur_seg);
198 1025
        Lck_AssertHeld(&sc->mtx);
199
200
        /* XXX: find where it goes in silo */
201
202 1025
        INIT_OBJ(&tmpsg, SMP_SEG_MAGIC);
203 1025
        tmpsg.sc = sc;
204 1025
        tmpsg.p.offset = sc->free_offset;
205
        /* XXX: align */
206 1025
        assert(tmpsg.p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
207 1025
        assert(tmpsg.p.offset < sc->mediasize);
208
209 1025
        tmpsg.p.length = sc->aim_segl;
210 1025
        tmpsg.p.length = RDN2(tmpsg.p.length, 8);
211
212 1025
        if (smp_segend(&tmpsg) > sc->mediasize)
213
                /* XXX: Consider truncation in this case */
214 0
                tmpsg.p.offset = sc->ident->stuff[SMP_SPC_STUFF];
215
216 1025
        assert(smp_segend(&tmpsg) <= sc->mediasize);
217
218 1025
        sg = VTAILQ_FIRST(&sc->segments);
219 1025
        if (sg != NULL && tmpsg.p.offset <= sg->p.offset) {
220 0
                if (smp_segend(&tmpsg) > sg->p.offset)
221
                        /* No more space, return (cur_seg will be NULL) */
222
                        /* XXX: Consider truncation instead of failing */
223 0
                        return;
224 0
                assert(smp_segend(&tmpsg) <= sg->p.offset);
225 0
        }
226
227 1025
        if (tmpsg.p.offset == sc->ident->stuff[SMP_SPC_STUFF])
228 575
                printf("Wrapped silo\n");
229
230 1025
        ALLOC_OBJ(sg, SMP_SEG_MAGIC);
231 1025
        if (sg == NULL)
232 0
                return;
233 1025
        *sg = tmpsg;
234 1025
        VTAILQ_INIT(&sg->objcores);
235
236 1025
        sg->p.offset = IRNUP(sc, sg->p.offset);
237 1025
        sg->p.length -= sg->p.offset - tmpsg.p.offset;
238 1025
        sg->p.length = IRNDN(sc, sg->p.length);
239 1025
        assert(sg->p.offset + sg->p.length <= tmpsg.p.offset + tmpsg.p.length);
240 1025
        sc->free_offset = sg->p.offset + sg->p.length;
241
242 1025
        VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
243
244
        /* Neuter the new segment in case there is an old one there */
245 1025
        AN(sg->p.offset);
246 1025
        smp_def_sign(sc, sg->ctx, sg->p.offset, "SEGHEAD");
247 1025
        smp_reset_sign(sg->ctx);
248 1025
        smp_sync_sign(sg->ctx);
249
250
        /* Set up our allocation points */
251 1025
        sc->cur_seg = sg;
252 1025
        sc->next_bot = sg->p.offset + IRNUP(sc, SMP_SIGN_SPACE);
253 1025
        sc->next_top = smp_segend(sg);
254 1025
        sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
255 1025
        IASSERTALIGN(sc, sc->next_bot);
256 1025
        IASSERTALIGN(sc, sc->next_top);
257 1025
        sg->objs = (void*)(sc->base + sc->next_top);
258 1025
}
259
260
/*--------------------------------------------------------------------
261
 * Close a segment
262
 */
263
264
void
265 1000
smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
266
{
267
        uint64_t left, dst, len;
268
        void *dp;
269
270 1000
        CHECK_OBJ_NOTNULL(sc, SMP_SC_MAGIC);
271 1000
        Lck_AssertHeld(&sc->mtx);
272
273 1000
        CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
274 1000
        assert(sg == sc->cur_seg);
275 1000
        AN(sg->p.offset);
276 1000
        sc->cur_seg = NULL;
277
278 1000
        if (sg->nalloc == 0) {
279
                /* If segment is empty, delete instead */
280 525
                VTAILQ_REMOVE(&sc->segments, sg, list);
281 525
                assert(sg->p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
282 525
                assert(sg->p.offset < sc->mediasize);
283 525
                sc->free_offset = sg->p.offset;
284 525
                AN(VTAILQ_EMPTY(&sg->objcores));
285 525
                FREE_OBJ(sg);
286 525
                return;
287
        }
288
289
        /*
290
         * If there is enough space left, that we can move the smp_objects
291
         * down without overwriting the present copy, we will do so to
292
         * compact the segment.
293
         */
294 475
        left = smp_spaceleft(sc, sg);
295 475
        len = sizeof(struct smp_object) * sg->p.lobjlist;
296 475
        if (len < left) {
297 475
                dst = sc->next_bot + IRNUP(sc, SMP_SIGN_SPACE);
298 475
                dp = sc->base + dst;
299 475
                assert((uintptr_t)dp + len < (uintptr_t)sg->objs);
300 475
                memcpy(dp, sg->objs, len);
301 475
                sc->next_top = dst;
302 475
                sg->objs = dp;
303 950
                sg->p.length = (sc->next_top - sg->p.offset)
304 475
                     + len + IRNUP(sc, SMP_SIGN_SPACE);
305 475
                (void)smp_spaceleft(sc, sg);    /* for the asserts */
306
307 475
        }
308
309
        /* Update the segment header */
310 475
        sg->p.objlist = sc->next_top;
311
312
        /* Write the (empty) OBJIDX signature */
313 475
        sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
314 475
        assert(sc->next_top >= sc->next_bot);
315 475
        smp_def_sign(sc, sg->ctx, sc->next_top, "OBJIDX");
316 475
        smp_reset_sign(sg->ctx);
317 475
        smp_sync_sign(sg->ctx);
318
319
        /* Write the (empty) SEGTAIL signature */
320 950
        smp_def_sign(sc, sg->ctx,
321 475
            sg->p.offset + sg->p.length - IRNUP(sc, SMP_SIGN_SPACE), "SEGTAIL");
322 475
        smp_reset_sign(sg->ctx);
323 475
        smp_sync_sign(sg->ctx);
324
325
        /* Save segment list */
326 475
        smp_save_segs(sc);
327 475
        sc->free_offset = smp_segend(sg);
328 1000
}
329
330
331
/*---------------------------------------------------------------------
332
 */
333
334
static struct smp_object *
335 9523
smp_find_so(const struct smp_seg *sg, unsigned priv2)
336
{
337
        struct smp_object *so;
338
339 9523
        priv2 &= ~NEED_FIXUP;
340 9523
        assert(priv2 > 0);
341 9523
        assert(priv2 <= sg->p.lobjlist);
342 9523
        so = &sg->objs[sg->p.lobjlist - priv2];
343 9523
        return (so);
344
}
345
346
/*---------------------------------------------------------------------
347
 * Check if a given storage structure is valid to use
348
 */
349
350
static int
351 50
smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
352
    const struct storage *st)
353
{
354
        struct smp_seg *sg2;
355
        const uint8_t *pst;
356
        uint64_t o;
357
358 50
        (void)sg;               /* XXX: faster: Start search from here */
359 50
        pst = (const void *)st;
360
361 50
        if (pst < (sc->base + sc->ident->stuff[SMP_SPC_STUFF]))
362 0
                return (0x01);          /* Before silo payload start */
363 50
        if (pst > (sc->base + sc->ident->stuff[SMP_END_STUFF]))
364 0
                return (0x02);          /* After silo end */
365
366 50
        o = pst - sc->base;
367
368
        /* Find which segment contains the storage structure */
369 100
        VTAILQ_FOREACH(sg2, &sc->segments, list)
370 75
                if (o > sg2->p.offset && (o + sizeof(*st)) < sg2->p.objlist)
371 25
                        break;
372 50
        if (sg2 == NULL)
373 25
                return (0x04);          /* No claiming segment */
374 25
        if (!(sg2->flags & SMP_SEG_LOADED))
375 0
                return (0x08);          /* Claiming segment not loaded */
376
377
        /* It is now safe to access the storage structure */
378 25
        if (st->magic != STORAGE_MAGIC)
379 0
                return (0x10);          /* Not enough magic */
380
381 25
        if (o + st->space >= sg2->p.objlist)
382 0
                return (0x20);          /* Allocation not inside segment */
383
384 25
        if (st->len > st->space)
385 0
                return (0x40);          /* Plain bad... */
386
387
        /*
388
         * XXX: We could patch up st->stevedore and st->priv here
389
         * XXX: but if things go right, we will never need them.
390
         */
391 25
        return (0);
392 50
}
393
394
/*---------------------------------------------------------------------
395
 * objcore methods for persistent objects
396
 */
397
398
static void
399 47307
fix_ptr(const struct smp_seg *sg, const struct storage *st, void **ptr)
400
{
401
        // See comment where used below
402
        uintptr_t u;
403
404 47307
        u = (uintptr_t)(*ptr);
405 47307
        if (u != 0) {
406 32609
                u -= (uintptr_t)st->priv;
407 32609
                u += (uintptr_t)sg->sc->base;
408 32609
        }
409 47307
        *ptr = (void *)u;
410 47307
}
411
412
struct object * v_matchproto_(sml_getobj_f)
413 7250
smp_sml_getobj(struct worker *wrk, struct objcore *oc)
414
{
415
        struct object *o;
416
        struct smp_seg *sg;
417
        struct smp_object *so;
418
        struct storage *st, *st2;
419
        uint64_t l;
420
        int bad;
421
422 7250
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
423 7250
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
424 7250
        AN(oc->stobj->stevedore);
425
426 7250
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
427 7250
        so = smp_find_so(sg, oc->stobj->priv2);
428
429
        /**************************************************************
430
         * The silo may have been remapped at a different address,
431
         * because the people who came up with ASLR were unable
432
         * imagine that there might be beneficial use-cases for
433
         * always mapping a file at the same specific address.
434
         *
435
         * We store the silos base address in struct storage->priv
436
         * and manually fix all the pointers in struct object and
437
         * the list of struct storage objects which hold the body.
438
         * When done, we update the storage->priv, so we can do the
439
         * same trick next time.
440
         *
441
         * This is a prohibitively expensive workaround, but we can
442
         * live with it, because the role of this stevedore is only
443
         * to keep the internal stevedore API honest.
444
         */
445
446 7250
        st = (void*)(sg->sc->base + so->ptr);
447 7250
        fix_ptr(sg, st, (void**)&st->ptr);
448
449 7250
        o = (void*)st->ptr;
450 7250
        fix_ptr(sg, st, (void**)&o->objstore);
451 7250
        fix_ptr(sg, st, (void**)&o->va_vary);
452 7250
        fix_ptr(sg, st, (void**)&o->va_headers);
453 7250
        fix_ptr(sg, st, (void**)&o->list.vtqh_first);
454 7250
        fix_ptr(sg, st, (void**)&o->list.vtqh_last);
455 7250
        st->priv = (void*)(sg->sc->base);
456
457 7250
        st2 = o->list.vtqh_first;
458 8521
        while (st2 != NULL) {
459 1271
                fix_ptr(sg, st2, (void**)&st2->list.vtqe_next);
460 1271
                fix_ptr(sg, st2, (void**)&st2->list.vtqe_prev);
461 1271
                fix_ptr(sg, st2, (void**)&st2->ptr);
462 1271
                st2->priv = (void*)(sg->sc->base);
463 1271
                st2 = st2->list.vtqe_next;
464
        }
465
466
        /*
467
         * The object may not be in this segment since we allocate it
468
         * In a separate operation than the smp_object.  We could check
469
         * that it is in a later segment, but that would be complicated.
470
         * XXX: For now, be happy if it is inside the silo
471
         */
472 7250
        ASSERT_PTR_IN_SILO(sg->sc, o);
473 7250
        CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
474
475
        /*
476
         * If this flag is not set, it will not be, and the lock is not
477
         * needed to test it.
478
         */
479 7250
        if (!(oc->stobj->priv2 & NEED_FIXUP))
480 6925
                return (o);
481
482 325
        Lck_Lock(&sg->sc->mtx);
483
        /* Check again, we might have raced. */
484 325
        if (oc->stobj->priv2 & NEED_FIXUP) {
485
                /* We trust caller to have a refcnt for us */
486
487 325
                bad = 0;
488 325
                l = 0;
489 350
                VTAILQ_FOREACH(st, &o->list, list) {
490 50
                        bad |= smp_loaded_st(sg->sc, sg, st);
491 50
                        if (bad)
492 25
                                break;
493 25
                        l += st->len;
494 25
                }
495 325
                if (l != vbe64dec(o->fa_len))
496 25
                        bad |= 0x100;
497
498 325
                if (bad) {
499 25
                        EXP_ZERO(oc);
500 25
                        EXP_ZERO(so);
501 25
                }
502
503 325
                sg->nfixed++;
504 325
                wrk->stats->n_object++;
505 325
                wrk->stats->n_vampireobject--;
506 325
                oc->stobj->priv2 &= ~NEED_FIXUP;
507 325
        }
508 325
        Lck_Unlock(&sg->sc->mtx);
509 325
        return (o);
510 7250
}
511
512
void v_matchproto_(objfree_f)
513 100
smp_oc_objfree(struct worker *wrk, struct objcore *oc)
514
{
515
        struct smp_seg *sg;
516
        struct smp_object *so;
517
518 100
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
519 100
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
520
521 100
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
522 100
        so = smp_find_so(sg, oc->stobj->priv2);
523
524 100
        Lck_Lock(&sg->sc->mtx);
525 100
        EXP_ZERO(so);
526 100
        so->ptr = 0;
527
528 100
        assert(sg->nobj > 0);
529 100
        sg->nobj--;
530 100
        if (oc->stobj->priv2 & NEED_FIXUP) {
531 0
                wrk->stats->n_vampireobject--;
532 0
        } else {
533 100
                assert(sg->nfixed > 0);
534 100
                sg->nfixed--;
535 100
                wrk->stats->n_object--;
536
        }
537 100
        VTAILQ_REMOVE(&sg->objcores, oc, lru_list);
538
539 100
        Lck_Unlock(&sg->sc->mtx);
540 100
        memset(oc->stobj, 0, sizeof oc->stobj);
541 100
}
542
543
/*--------------------------------------------------------------------*/
544
545
void
546 975
smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx)
547
{
548
549 975
        AZ(objidx & NEED_FIXUP);
550 975
        oc->stobj->priv = sg;
551 975
        oc->stobj->priv2 = objidx;
552 975
}
553
554
/*--------------------------------------------------------------------*/
555
556
void v_matchproto_(obj_event_f)
557 2574
smp_oc_event(struct worker *wrk, void *priv, struct objcore *oc, unsigned ev)
558
{
559
        struct stevedore *st;
560
        struct smp_seg *sg;
561
        struct smp_object *so;
562
563 2574
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
564 2574
        CAST_OBJ_NOTNULL(st, priv, STEVEDORE_MAGIC);
565 2574
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
566
567 2574
        if (oc->stobj->stevedore != st)
568 400
                return;
569
570 2174
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
571 2174
        CHECK_OBJ_NOTNULL(sg->sc, SMP_SC_MAGIC);
572 2174
        so = smp_find_so(sg, oc->stobj->priv2);
573
574 2174
        if (sg == sg->sc->cur_seg) {
575
                /* Lock necessary, we might race close_seg */
576 1100
                Lck_Lock(&sg->sc->mtx);
577 1100
                if (ev & (OEV_BANCHG|OEV_INSERT))
578 550
                        so->ban = BAN_Time(oc->ban);
579 1100
                if (ev & (OEV_TTLCHG|OEV_INSERT))
580 1100
                        EXP_COPY(so, oc);
581 1100
                Lck_Unlock(&sg->sc->mtx);
582 1100
        } else {
583 1074
                if (ev & (OEV_BANCHG|OEV_INSERT))
584 650
                        so->ban = BAN_Time(oc->ban);
585 1074
                if (ev & (OEV_TTLCHG|OEV_INSERT))
586 849
                        EXP_COPY(so, oc);
587
        }
588 2574
}
589