| | varnish-cache/bin/varnishd/storage/storage_persistent.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2008-2011 Varnish Software AS |
| 2 |
|
* All rights reserved. |
| 3 |
|
* |
| 4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 5 |
|
* |
| 6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 7 |
|
* |
| 8 |
|
* Redistribution and use in source and binary forms, with or without |
| 9 |
|
* modification, are permitted provided that the following conditions |
| 10 |
|
* are met: |
| 11 |
|
* 1. Redistributions of source code must retain the above copyright |
| 12 |
|
* notice, this list of conditions and the following disclaimer. |
| 13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 14 |
|
* notice, this list of conditions and the following disclaimer in the |
| 15 |
|
* documentation and/or other materials provided with the distribution. |
| 16 |
|
* |
| 17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 27 |
|
* SUCH DAMAGE. |
| 28 |
|
* |
| 29 |
|
* Persistent storage method |
| 30 |
|
* |
| 31 |
|
* XXX: Before we start the client or maybe after it stops, we should give the |
| 32 |
|
* XXX: stevedores a chance to examine their storage for consistency. |
| 33 |
|
* |
| 34 |
|
* XXX: Do we ever free the LRU-lists ? |
| 35 |
|
*/ |
| 36 |
|
|
| 37 |
|
#include "config.h" |
| 38 |
|
|
| 39 |
|
#include "cache/cache_varnishd.h" |
| 40 |
|
|
| 41 |
|
#include <sys/mman.h> |
| 42 |
|
|
| 43 |
|
#include <stdio.h> |
| 44 |
|
#include <stdlib.h> |
| 45 |
|
|
| 46 |
|
#include "cache/cache_obj.h" |
| 47 |
|
#include "cache/cache_objhead.h" |
| 48 |
|
#include "storage/storage.h" |
| 49 |
|
#include "storage/storage_simple.h" |
| 50 |
|
|
| 51 |
|
#include "vcli_serve.h" |
| 52 |
|
#include "vsha256.h" |
| 53 |
|
#include "vtim.h" |
| 54 |
|
|
| 55 |
|
#include "storage/storage_persistent.h" |
| 56 |
|
|
| 57 |
|
static struct obj_methods smp_oc_realmethods; |
| 58 |
|
|
| 59 |
|
static struct VSC_lck *lck_smp; |
| 60 |
|
|
| 61 |
|
static void smp_init(void); |
| 62 |
|
|
| 63 |
|
#ifndef WITH_PERSISTENT_STORAGE |
| 64 |
|
#error "WITH_PERSISTENT_STORAGE must be defined" |
| 65 |
|
#endif |
| 66 |
|
|
| 67 |
|
/*--------------------------------------------------------------------*/ |
| 68 |
|
|
| 69 |
|
/* |
| 70 |
|
* silos is unlocked, it only changes during startup when we are |
| 71 |
|
* single-threaded |
| 72 |
|
*/ |
| 73 |
|
static VTAILQ_HEAD(,smp_sc) silos = VTAILQ_HEAD_INITIALIZER(silos); |
| 74 |
|
|
| 75 |
|
/*-------------------------------------------------------------------- |
| 76 |
|
* Add bans to silos |
| 77 |
|
*/ |
| 78 |
|
|
| 79 |
|
static int |
| 80 |
3840 |
smp_appendban(const struct smp_sc *sc, struct smp_signspace *spc, |
| 81 |
|
uint32_t len, const uint8_t *ban) |
| 82 |
|
{ |
| 83 |
|
|
| 84 |
3840 |
(void)sc; |
| 85 |
3840 |
if (SIGNSPACE_FREE(spc) < len) |
| 86 |
0 |
return (-1); |
| 87 |
|
|
| 88 |
3840 |
memcpy(SIGNSPACE_FRONT(spc), ban, len); |
| 89 |
3840 |
smp_append_signspace(spc, len); |
| 90 |
|
|
| 91 |
3840 |
return (0); |
| 92 |
3840 |
} |
| 93 |
|
|
| 94 |
|
/* Trust that cache_ban.c takes care of locking */ |
| 95 |
|
|
| 96 |
|
static int |
| 97 |
2760 |
smp_baninfo(const struct stevedore *stv, enum baninfo event, |
| 98 |
|
const uint8_t *ban, unsigned len) |
| 99 |
|
{ |
| 100 |
|
struct smp_sc *sc; |
| 101 |
2760 |
int r = 0; |
| 102 |
|
|
| 103 |
2760 |
CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC); |
| 104 |
|
|
| 105 |
2760 |
switch (event) { |
| 106 |
|
case BI_NEW: |
| 107 |
1920 |
r |= smp_appendban(sc, &sc->ban1, len, ban); |
| 108 |
1920 |
r |= smp_appendban(sc, &sc->ban2, len, ban); |
| 109 |
1920 |
break; |
| 110 |
|
default: |
| 111 |
|
/* Ignored */ |
| 112 |
840 |
break; |
| 113 |
|
} |
| 114 |
|
|
| 115 |
2760 |
return (r); |
| 116 |
|
} |
| 117 |
|
|
| 118 |
|
static void |
| 119 |
6000 |
smp_banexport_spc(struct smp_signspace *spc, const uint8_t *bans, unsigned len) |
| 120 |
|
{ |
| 121 |
6000 |
smp_reset_signspace(spc); |
| 122 |
6000 |
assert(SIGNSPACE_FREE(spc) >= len); |
| 123 |
6000 |
memcpy(SIGNSPACE_DATA(spc), bans, len); |
| 124 |
6000 |
smp_append_signspace(spc, len); |
| 125 |
6000 |
smp_sync_sign(&spc->ctx); |
| 126 |
6000 |
} |
| 127 |
|
|
| 128 |
|
static void |
| 129 |
3000 |
smp_banexport(const struct stevedore *stv, const uint8_t *bans, unsigned len) |
| 130 |
|
{ |
| 131 |
|
struct smp_sc *sc; |
| 132 |
|
|
| 133 |
3000 |
CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC); |
| 134 |
3000 |
smp_banexport_spc(&sc->ban1, bans, len); |
| 135 |
3000 |
smp_banexport_spc(&sc->ban2, bans, len); |
| 136 |
3000 |
} |
| 137 |
|
|
| 138 |
|
/*-------------------------------------------------------------------- |
| 139 |
|
* Attempt to open and read in a ban list |
| 140 |
|
*/ |
| 141 |
|
|
| 142 |
|
static int |
| 143 |
1520 |
smp_open_bans(const struct smp_sc *sc, struct smp_signspace *spc) |
| 144 |
|
{ |
| 145 |
|
uint8_t *ptr, *pe; |
| 146 |
|
int i; |
| 147 |
|
|
| 148 |
1520 |
ASSERT_CLI(); |
| 149 |
1520 |
(void)sc; |
| 150 |
1520 |
i = smp_chk_signspace(spc); |
| 151 |
1520 |
if (i) |
| 152 |
0 |
return (i); |
| 153 |
|
|
| 154 |
1520 |
ptr = SIGNSPACE_DATA(spc); |
| 155 |
1520 |
pe = SIGNSPACE_FRONT(spc); |
| 156 |
1520 |
BAN_Reload(ptr, pe - ptr); |
| 157 |
|
|
| 158 |
1520 |
return (0); |
| 159 |
1520 |
} |
| 160 |
|
|
| 161 |
|
/*-------------------------------------------------------------------- |
| 162 |
|
* Attempt to open and read in a segment list |
| 163 |
|
*/ |
| 164 |
|
|
| 165 |
|
static int |
| 166 |
1520 |
smp_open_segs(struct smp_sc *sc, struct smp_signspace *spc) |
| 167 |
|
{ |
| 168 |
|
uint64_t length, l; |
| 169 |
|
struct smp_segptr *ss, *se; |
| 170 |
|
struct smp_seg *sg, *sg1, *sg2; |
| 171 |
1520 |
int i, n = 0; |
| 172 |
|
|
| 173 |
1520 |
ASSERT_CLI(); |
| 174 |
1520 |
i = smp_chk_signspace(spc); |
| 175 |
1520 |
if (i) |
| 176 |
0 |
return (i); |
| 177 |
|
|
| 178 |
1520 |
ss = SIGNSPACE_DATA(spc); |
| 179 |
1520 |
length = SIGNSPACE_LEN(spc); |
| 180 |
|
|
| 181 |
1520 |
if (length == 0) { |
| 182 |
|
/* No segments */ |
| 183 |
920 |
sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF]; |
| 184 |
920 |
return (0); |
| 185 |
|
} |
| 186 |
600 |
se = ss + length / sizeof *ss; |
| 187 |
600 |
se--; |
| 188 |
600 |
assert(ss <= se); |
| 189 |
|
|
| 190 |
|
/* |
| 191 |
|
* Locate the free reserve, there are only two basic cases, |
| 192 |
|
* but once we start dropping segments, things gets more complicated. |
| 193 |
|
*/ |
| 194 |
|
|
| 195 |
600 |
sc->free_offset = se->offset + se->length; |
| 196 |
600 |
l = sc->mediasize - sc->free_offset; |
| 197 |
600 |
if (se->offset > ss->offset && l >= sc->free_reserve) { |
| 198 |
|
/* |
| 199 |
|
* [__xxxxyyyyzzzz___] |
| 200 |
|
* Plenty of space at tail, do nothing. |
| 201 |
|
*/ |
| 202 |
600 |
} else if (ss->offset > se->offset) { |
| 203 |
|
/* |
| 204 |
|
* [zzzz____xxxxyyyy_] |
| 205 |
|
* (make) space between ends |
| 206 |
|
* We might nuke the entire tail end without getting |
| 207 |
|
* enough space, in which case we fall through to the |
| 208 |
|
* last check. |
| 209 |
|
*/ |
| 210 |
0 |
while (ss < se && ss->offset > se->offset) { |
| 211 |
0 |
l = ss->offset - (se->offset + se->length); |
| 212 |
0 |
if (l > sc->free_reserve) |
| 213 |
0 |
break; |
| 214 |
0 |
ss++; |
| 215 |
0 |
n++; |
| 216 |
|
} |
| 217 |
0 |
} |
| 218 |
|
|
| 219 |
600 |
if (l < sc->free_reserve) { |
| 220 |
|
/* |
| 221 |
|
* [__xxxxyyyyzzzz___] |
| 222 |
|
* (make) space at front |
| 223 |
|
*/ |
| 224 |
0 |
sc->free_offset = sc->ident->stuff[SMP_SPC_STUFF]; |
| 225 |
0 |
while (ss < se) { |
| 226 |
0 |
l = ss->offset - sc->free_offset; |
| 227 |
0 |
if (l > sc->free_reserve) |
| 228 |
0 |
break; |
| 229 |
0 |
ss++; |
| 230 |
0 |
n++; |
| 231 |
|
} |
| 232 |
0 |
} |
| 233 |
|
|
| 234 |
600 |
assert(l >= sc->free_reserve); |
| 235 |
|
|
| 236 |
|
|
| 237 |
600 |
sg1 = NULL; |
| 238 |
600 |
sg2 = NULL; |
| 239 |
1200 |
for (; ss <= se; ss++) { |
| 240 |
600 |
ALLOC_OBJ(sg, SMP_SEG_MAGIC); |
| 241 |
600 |
AN(sg); |
| 242 |
600 |
VTAILQ_INIT(&sg->objcores); |
| 243 |
600 |
sg->p = *ss; |
| 244 |
|
|
| 245 |
600 |
sg->flags |= SMP_SEG_MUSTLOAD; |
| 246 |
|
|
| 247 |
|
/* |
| 248 |
|
* HACK: prevent save_segs from nuking segment until we have |
| 249 |
|
* HACK: loaded it. |
| 250 |
|
*/ |
| 251 |
600 |
sg->nobj = 1; |
| 252 |
600 |
if (sg1 != NULL) { |
| 253 |
0 |
assert(sg1->p.offset != sg->p.offset); |
| 254 |
0 |
if (sg1->p.offset < sg->p.offset) |
| 255 |
0 |
assert(smp_segend(sg1) <= sg->p.offset); |
| 256 |
|
else |
| 257 |
0 |
assert(smp_segend(sg) <= sg1->p.offset); |
| 258 |
0 |
} |
| 259 |
600 |
if (sg2 != NULL) { |
| 260 |
0 |
assert(sg2->p.offset != sg->p.offset); |
| 261 |
0 |
if (sg2->p.offset < sg->p.offset) |
| 262 |
0 |
assert(smp_segend(sg2) <= sg->p.offset); |
| 263 |
|
else |
| 264 |
0 |
assert(smp_segend(sg) <= sg2->p.offset); |
| 265 |
0 |
} |
| 266 |
|
|
| 267 |
|
/* XXX: check that they are inside silo */ |
| 268 |
|
/* XXX: check that they don't overlap */ |
| 269 |
|
/* XXX: check that they are serial */ |
| 270 |
600 |
sg->sc = sc; |
| 271 |
600 |
VTAILQ_INSERT_TAIL(&sc->segments, sg, list); |
| 272 |
600 |
sg2 = sg; |
| 273 |
600 |
if (sg1 == NULL) |
| 274 |
600 |
sg1 = sg; |
| 275 |
600 |
} |
| 276 |
600 |
printf("Dropped %d segments to make free_reserve\n", n); |
| 277 |
600 |
return (0); |
| 278 |
1520 |
} |
| 279 |
|
|
| 280 |
|
/*-------------------------------------------------------------------- |
| 281 |
|
* Silo worker thread |
| 282 |
|
*/ |
| 283 |
|
|
| 284 |
|
static void * v_matchproto_(bgthread_t) |
| 285 |
1520 |
smp_thread(struct worker *wrk, void *priv) |
| 286 |
|
{ |
| 287 |
|
struct smp_sc *sc; |
| 288 |
|
struct smp_seg *sg; |
| 289 |
|
|
| 290 |
1520 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 291 |
1520 |
CAST_OBJ_NOTNULL(sc, priv, SMP_SC_MAGIC); |
| 292 |
1520 |
sc->thread = pthread_self(); |
| 293 |
|
|
| 294 |
|
/* First, load all the objects from all segments */ |
| 295 |
3640 |
VTAILQ_FOREACH(sg, &sc->segments, list) |
| 296 |
2720 |
if (sg->flags & SMP_SEG_MUSTLOAD) |
| 297 |
600 |
smp_load_seg(wrk, sc, sg); |
| 298 |
|
|
| 299 |
1520 |
sc->flags |= SMP_SC_LOADED; |
| 300 |
1520 |
BAN_Release(); |
| 301 |
1520 |
printf("Silo completely loaded\n"); |
| 302 |
|
|
| 303 |
|
/* Housekeeping loop */ |
| 304 |
1520 |
Lck_Lock(&sc->mtx); |
| 305 |
3277 |
while (!(sc->flags & SMP_SC_STOP)) { |
| 306 |
1757 |
sg = VTAILQ_FIRST(&sc->segments); |
| 307 |
1757 |
if (sg != NULL && sg != sc->cur_seg && sg->nobj == 0) |
| 308 |
71 |
smp_save_segs(sc); |
| 309 |
|
|
| 310 |
1757 |
Lck_Unlock(&sc->mtx); |
| 311 |
1757 |
VTIM_sleep(3.14159265359 - 2); |
| 312 |
1757 |
Lck_Lock(&sc->mtx); |
| 313 |
|
} |
| 314 |
|
|
| 315 |
1520 |
smp_save_segs(sc); |
| 316 |
|
|
| 317 |
1520 |
Lck_Unlock(&sc->mtx); |
| 318 |
1520 |
pthread_exit(0); |
| 319 |
|
|
| 320 |
|
NEEDLESS(return (NULL)); |
| 321 |
|
} |
| 322 |
|
|
| 323 |
|
/*-------------------------------------------------------------------- |
| 324 |
|
* Open a silo in the worker process |
| 325 |
|
*/ |
| 326 |
|
|
| 327 |
|
static void v_matchproto_(storage_open_f) |
| 328 |
1520 |
smp_open(struct stevedore *st) |
| 329 |
|
{ |
| 330 |
|
struct smp_sc *sc; |
| 331 |
|
|
| 332 |
1520 |
ASSERT_CLI(); |
| 333 |
|
|
| 334 |
1520 |
if (VTAILQ_EMPTY(&silos)) |
| 335 |
1160 |
smp_init(); |
| 336 |
|
|
| 337 |
1520 |
CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC); |
| 338 |
|
|
| 339 |
1520 |
Lck_New(&sc->mtx, lck_smp); |
| 340 |
1520 |
Lck_Lock(&sc->mtx); |
| 341 |
|
|
| 342 |
1520 |
sc->stevedore = st; |
| 343 |
|
|
| 344 |
|
/* We trust the parent to give us a valid silo, for good measure: */ |
| 345 |
1520 |
AZ(smp_valid_silo(sc)); |
| 346 |
|
|
| 347 |
1520 |
AZ(mprotect((void*)sc->base, 4096, PROT_READ)); |
| 348 |
|
|
| 349 |
1520 |
sc->ident = SIGN_DATA(&sc->idn); |
| 350 |
|
|
| 351 |
|
/* Check ban lists */ |
| 352 |
1520 |
if (smp_chk_signspace(&sc->ban1)) { |
| 353 |
|
/* Ban list 1 is broken, use ban2 */ |
| 354 |
0 |
AZ(smp_chk_signspace(&sc->ban2)); |
| 355 |
0 |
smp_copy_signspace(&sc->ban1, &sc->ban2); |
| 356 |
0 |
smp_sync_sign(&sc->ban1.ctx); |
| 357 |
0 |
} else { |
| 358 |
|
/* Ban1 is OK, copy to ban2 for consistency */ |
| 359 |
1520 |
smp_copy_signspace(&sc->ban2, &sc->ban1); |
| 360 |
1520 |
smp_sync_sign(&sc->ban2.ctx); |
| 361 |
|
} |
| 362 |
1520 |
AZ(smp_open_bans(sc, &sc->ban1)); |
| 363 |
|
|
| 364 |
|
/* We attempt seg1 first, and if that fails, try seg2 */ |
| 365 |
1520 |
if (smp_open_segs(sc, &sc->seg1)) |
| 366 |
0 |
AZ(smp_open_segs(sc, &sc->seg2)); |
| 367 |
|
|
| 368 |
|
/* |
| 369 |
|
* Grab a reference to the tail of the ban list, until the thread |
| 370 |
|
* has loaded all objects, so we can be sure that all of our |
| 371 |
|
* proto-bans survive until then. |
| 372 |
|
*/ |
| 373 |
1520 |
BAN_Hold(); |
| 374 |
|
|
| 375 |
|
/* XXX: save segments to ensure consistency between seg1 & seg2 ? */ |
| 376 |
|
|
| 377 |
|
/* XXX: abandon early segments to make sure we have free space ? */ |
| 378 |
|
|
| 379 |
1520 |
(void)ObjSubscribeEvents(smp_oc_event, st, |
| 380 |
|
OEV_BANCHG|OEV_TTLCHG|OEV_INSERT); |
| 381 |
|
|
| 382 |
|
/* Open a new segment, so we are ready to write */ |
| 383 |
1520 |
smp_new_seg(sc); |
| 384 |
|
|
| 385 |
|
/* Start the worker silo worker thread, it will load the objects */ |
| 386 |
1520 |
WRK_BgThread(&sc->bgthread, "persistence", smp_thread, sc); |
| 387 |
|
|
| 388 |
1520 |
VTAILQ_INSERT_TAIL(&silos, sc, list); |
| 389 |
1520 |
Lck_Unlock(&sc->mtx); |
| 390 |
1520 |
} |
| 391 |
|
|
| 392 |
|
/*-------------------------------------------------------------------- |
| 393 |
|
* Close a silo |
| 394 |
|
*/ |
| 395 |
|
|
| 396 |
|
static void v_matchproto_(storage_close_f) |
| 397 |
2960 |
smp_close(const struct stevedore *st, int warn) |
| 398 |
|
{ |
| 399 |
|
struct smp_sc *sc; |
| 400 |
|
void *status; |
| 401 |
|
|
| 402 |
2960 |
ASSERT_CLI(); |
| 403 |
|
|
| 404 |
2960 |
CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC); |
| 405 |
2960 |
if (warn) { |
| 406 |
1480 |
Lck_Lock(&sc->mtx); |
| 407 |
1480 |
if (sc->cur_seg != NULL) |
| 408 |
1480 |
smp_close_seg(sc, sc->cur_seg); |
| 409 |
1480 |
AZ(sc->cur_seg); |
| 410 |
1480 |
sc->flags |= SMP_SC_STOP; |
| 411 |
1480 |
Lck_Unlock(&sc->mtx); |
| 412 |
1480 |
} else { |
| 413 |
1480 |
PTOK(pthread_join(sc->bgthread, &status)); |
| 414 |
1480 |
AZ(status); |
| 415 |
|
} |
| 416 |
2960 |
} |
| 417 |
|
|
| 418 |
|
/*-------------------------------------------------------------------- |
| 419 |
|
* Allocate a bite. |
| 420 |
|
* |
| 421 |
|
* Allocate [min_size...max_size] space from the bottom of the segment, |
| 422 |
|
* as is convenient. |
| 423 |
|
* |
| 424 |
|
* If 'so' + 'idx' is given, also allocate a smp_object from the top |
| 425 |
|
* of the segment. |
| 426 |
|
* |
| 427 |
|
* Return the segment in 'ssg' if given. |
| 428 |
|
*/ |
| 429 |
|
|
| 430 |
|
static struct storage * |
| 431 |
1120 |
smp_allocx(const struct stevedore *st, size_t min_size, size_t max_size, |
| 432 |
|
struct smp_object **so, unsigned *idx, struct smp_seg **ssg) |
| 433 |
|
{ |
| 434 |
|
struct smp_sc *sc; |
| 435 |
|
struct storage *ss; |
| 436 |
|
struct smp_seg *sg; |
| 437 |
|
uint64_t left, extra; |
| 438 |
|
|
| 439 |
1120 |
CAST_OBJ_NOTNULL(sc, st->priv, SMP_SC_MAGIC); |
| 440 |
1120 |
assert(min_size <= max_size); |
| 441 |
|
|
| 442 |
1120 |
max_size = IRNUP(sc, max_size); |
| 443 |
1120 |
min_size = IRNUP(sc, min_size); |
| 444 |
|
|
| 445 |
1120 |
extra = IRNUP(sc, sizeof(*ss)); |
| 446 |
1120 |
if (so != NULL) { |
| 447 |
880 |
extra += sizeof(**so); |
| 448 |
880 |
AN(idx); |
| 449 |
880 |
} |
| 450 |
|
|
| 451 |
1120 |
Lck_Lock(&sc->mtx); |
| 452 |
1120 |
sg = NULL; |
| 453 |
1120 |
ss = NULL; |
| 454 |
|
|
| 455 |
1120 |
left = 0; |
| 456 |
1120 |
if (sc->cur_seg != NULL) |
| 457 |
1120 |
left = smp_spaceleft(sc, sc->cur_seg); |
| 458 |
1120 |
if (left < extra + min_size) { |
| 459 |
0 |
if (sc->cur_seg != NULL) |
| 460 |
0 |
smp_close_seg(sc, sc->cur_seg); |
| 461 |
0 |
smp_new_seg(sc); |
| 462 |
0 |
if (sc->cur_seg != NULL) |
| 463 |
0 |
left = smp_spaceleft(sc, sc->cur_seg); |
| 464 |
|
else |
| 465 |
0 |
left = 0; |
| 466 |
0 |
} |
| 467 |
|
|
| 468 |
1120 |
if (left >= extra + min_size) { |
| 469 |
1120 |
AN(sc->cur_seg); |
| 470 |
1120 |
if (left < extra + max_size) |
| 471 |
0 |
max_size = IRNDN(sc, left - extra); |
| 472 |
|
|
| 473 |
1120 |
sg = sc->cur_seg; |
| 474 |
1120 |
ss = (void*)(sc->base + sc->next_bot); |
| 475 |
1120 |
sc->next_bot += max_size + IRNUP(sc, sizeof(*ss)); |
| 476 |
1120 |
sg->nalloc++; |
| 477 |
1120 |
if (so != NULL) { |
| 478 |
880 |
sc->next_top -= sizeof(**so); |
| 479 |
880 |
*so = (void*)(sc->base + sc->next_top); |
| 480 |
|
/* Render this smp_object mostly harmless */ |
| 481 |
880 |
EXP_ZERO((*so)); |
| 482 |
880 |
(*so)->ban = 0.; |
| 483 |
880 |
(*so)->ptr = 0; |
| 484 |
880 |
sg->objs = *so; |
| 485 |
880 |
*idx = ++sg->p.lobjlist; |
| 486 |
880 |
} |
| 487 |
1120 |
(void)smp_spaceleft(sc, sg); /* for the assert */ |
| 488 |
1120 |
} |
| 489 |
1120 |
Lck_Unlock(&sc->mtx); |
| 490 |
|
|
| 491 |
1120 |
if (ss == NULL) |
| 492 |
0 |
return (ss); |
| 493 |
1120 |
AN(sg); |
| 494 |
1120 |
assert(max_size >= min_size); |
| 495 |
|
|
| 496 |
|
/* Fill the storage structure */ |
| 497 |
1120 |
INIT_OBJ(ss, STORAGE_MAGIC); |
| 498 |
1120 |
ss->ptr = PRNUP(sc, ss + 1); |
| 499 |
1120 |
ss->space = max_size; |
| 500 |
1120 |
ss->priv = sc->base; |
| 501 |
1120 |
if (ssg != NULL) |
| 502 |
880 |
*ssg = sg; |
| 503 |
1120 |
return (ss); |
| 504 |
1120 |
} |
| 505 |
|
|
| 506 |
|
/*-------------------------------------------------------------------- |
| 507 |
|
* Allocate an object |
| 508 |
|
*/ |
| 509 |
|
|
| 510 |
|
static int v_matchproto_(storage_allocobj_f) |
| 511 |
880 |
smp_allocobj(struct worker *wrk, const struct stevedore *stv, |
| 512 |
|
struct objcore *oc, unsigned wsl) |
| 513 |
|
{ |
| 514 |
|
struct object *o; |
| 515 |
|
struct storage *st; |
| 516 |
|
struct smp_sc *sc; |
| 517 |
|
struct smp_seg *sg; |
| 518 |
|
struct smp_object *so; |
| 519 |
|
unsigned objidx; |
| 520 |
|
unsigned ltot; |
| 521 |
|
|
| 522 |
880 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 523 |
880 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 524 |
880 |
CAST_OBJ_NOTNULL(sc, stv->priv, SMP_SC_MAGIC); |
| 525 |
|
|
| 526 |
|
/* Don't entertain already dead objects */ |
| 527 |
880 |
if (oc->flags & OC_F_DYING) |
| 528 |
0 |
return (0); |
| 529 |
880 |
if (oc->t_origin <= 0.) |
| 530 |
0 |
return (0); |
| 531 |
880 |
if (oc->ttl + oc->grace + oc->keep <= 0.) |
| 532 |
0 |
return (0); |
| 533 |
|
|
| 534 |
880 |
ltot = sizeof(struct object) + PRNDUP(wsl); |
| 535 |
880 |
ltot = IRNUP(sc, ltot); |
| 536 |
|
|
| 537 |
880 |
st = NULL; |
| 538 |
880 |
sg = NULL; |
| 539 |
880 |
so = NULL; |
| 540 |
880 |
objidx = 0; |
| 541 |
|
|
| 542 |
880 |
do { |
| 543 |
880 |
st = smp_allocx(stv, ltot, ltot, &so, &objidx, &sg); |
| 544 |
880 |
if (st != NULL && st->space < ltot) { |
| 545 |
0 |
stv->sml_free(st); // NOP |
| 546 |
0 |
st = NULL; |
| 547 |
0 |
} |
| 548 |
880 |
} while (st == NULL && LRU_NukeOne(wrk, stv->lru)); |
| 549 |
880 |
if (st == NULL) |
| 550 |
0 |
return (0); |
| 551 |
|
|
| 552 |
880 |
AN(st); |
| 553 |
880 |
AN(sg); |
| 554 |
880 |
AN(so); |
| 555 |
880 |
assert(st->space >= ltot); |
| 556 |
|
|
| 557 |
880 |
o = SML_MkObject(stv, oc, st->ptr); |
| 558 |
880 |
AN(oc->stobj->stevedore); |
| 559 |
880 |
assert(oc->stobj->stevedore == stv); |
| 560 |
880 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
| 561 |
880 |
o->objstore = st; |
| 562 |
880 |
st->len = sizeof(*o); |
| 563 |
|
|
| 564 |
880 |
Lck_Lock(&sc->mtx); |
| 565 |
880 |
sg->nfixed++; |
| 566 |
880 |
sg->nobj++; |
| 567 |
|
|
| 568 |
|
/* We have to do this somewhere, might as well be here... */ |
| 569 |
880 |
assert(sizeof so->hash == DIGEST_LEN); |
| 570 |
880 |
memcpy(so->hash, oc->objhead->digest, DIGEST_LEN); |
| 571 |
880 |
EXP_COPY(so, oc); |
| 572 |
880 |
so->ptr = (uint8_t*)(o->objstore) - sc->base; |
| 573 |
880 |
so->ban = BAN_Time(oc->ban); |
| 574 |
|
|
| 575 |
880 |
smp_init_oc(oc, sg, objidx); |
| 576 |
|
|
| 577 |
880 |
VTAILQ_INSERT_TAIL(&sg->objcores, oc, lru_list); |
| 578 |
880 |
Lck_Unlock(&sc->mtx); |
| 579 |
880 |
return (1); |
| 580 |
880 |
} |
| 581 |
|
|
| 582 |
|
/*-------------------------------------------------------------------- |
| 583 |
|
* Allocate a bite |
| 584 |
|
*/ |
| 585 |
|
|
| 586 |
|
static struct storage * v_matchproto_(sml_alloc_f) |
| 587 |
240 |
smp_alloc(const struct stevedore *st, size_t size) |
| 588 |
|
{ |
| 589 |
|
|
| 590 |
480 |
return (smp_allocx(st, |
| 591 |
240 |
size > 4096 ? 4096 : size, size, NULL, NULL, NULL)); |
| 592 |
|
} |
| 593 |
|
|
| 594 |
|
/*--------------------------------------------------------------------*/ |
| 595 |
|
|
| 596 |
|
const struct stevedore smp_stevedore = { |
| 597 |
|
.magic = STEVEDORE_MAGIC, |
| 598 |
|
.name = "deprecated_persistent", |
| 599 |
|
.init = smp_mgt_init, |
| 600 |
|
.open = smp_open, |
| 601 |
|
.close = smp_close, |
| 602 |
|
.allocobj = smp_allocobj, |
| 603 |
|
.baninfo = smp_baninfo, |
| 604 |
|
.banexport = smp_banexport, |
| 605 |
|
.methods = &smp_oc_realmethods, |
| 606 |
|
|
| 607 |
|
.sml_alloc = smp_alloc, |
| 608 |
|
.sml_free = NULL, |
| 609 |
|
.sml_getobj = smp_sml_getobj, |
| 610 |
|
}; |
| 611 |
|
|
| 612 |
|
/*-------------------------------------------------------------------- |
| 613 |
|
* Persistence is a bear to test unadulterated, so we cheat by adding |
| 614 |
|
* a cli command we can use to make it do tricks for us. |
| 615 |
|
*/ |
| 616 |
|
|
| 617 |
|
static void |
| 618 |
120 |
debug_report_silo(struct cli *cli, const struct smp_sc *sc) |
| 619 |
|
{ |
| 620 |
|
struct smp_seg *sg; |
| 621 |
|
|
| 622 |
240 |
VCLI_Out(cli, "Silo: %s (%s)\n", |
| 623 |
120 |
sc->stevedore->ident, sc->filename); |
| 624 |
280 |
VTAILQ_FOREACH(sg, &sc->segments, list) { |
| 625 |
320 |
VCLI_Out(cli, " Seg: [0x%jx ... +0x%jx]\n", |
| 626 |
160 |
(uintmax_t)sg->p.offset, (uintmax_t)sg->p.length); |
| 627 |
160 |
if (sg == sc->cur_seg) |
| 628 |
240 |
VCLI_Out(cli, |
| 629 |
|
" Alloc: [0x%jx ... 0x%jx] = 0x%jx free\n", |
| 630 |
120 |
(uintmax_t)(sc->next_bot), |
| 631 |
120 |
(uintmax_t)(sc->next_top), |
| 632 |
120 |
(uintmax_t)(sc->next_top - sc->next_bot)); |
| 633 |
320 |
VCLI_Out(cli, " %u nobj, %u alloc, %u lobjlist, %u fixed\n", |
| 634 |
160 |
sg->nobj, sg->nalloc, sg->p.lobjlist, sg->nfixed); |
| 635 |
160 |
} |
| 636 |
120 |
} |
| 637 |
|
|
| 638 |
|
static void v_matchproto_(cli_func_t) |
| 639 |
240 |
debug_persistent(struct cli *cli, const char * const * av, void *priv) |
| 640 |
|
{ |
| 641 |
|
struct smp_sc *sc; |
| 642 |
|
|
| 643 |
240 |
(void)priv; |
| 644 |
|
|
| 645 |
240 |
if (av[2] == NULL) { |
| 646 |
0 |
VTAILQ_FOREACH(sc, &silos, list) |
| 647 |
0 |
debug_report_silo(cli, sc); |
| 648 |
0 |
return; |
| 649 |
|
} |
| 650 |
240 |
VTAILQ_FOREACH(sc, &silos, list) |
| 651 |
240 |
if (!strcmp(av[2], sc->stevedore->ident)) |
| 652 |
240 |
break; |
| 653 |
240 |
if (sc == NULL) { |
| 654 |
0 |
VCLI_Out(cli, "Silo <%s> not found\n", av[2]); |
| 655 |
0 |
VCLI_SetResult(cli, CLIS_PARAM); |
| 656 |
0 |
return; |
| 657 |
|
} |
| 658 |
240 |
if (av[3] == NULL) { |
| 659 |
0 |
debug_report_silo(cli, sc); |
| 660 |
0 |
return; |
| 661 |
|
} |
| 662 |
240 |
Lck_Lock(&sc->mtx); |
| 663 |
240 |
if (!strcmp(av[3], "sync")) { |
| 664 |
120 |
if (sc->cur_seg != NULL) |
| 665 |
120 |
smp_close_seg(sc, sc->cur_seg); |
| 666 |
120 |
smp_new_seg(sc); |
| 667 |
240 |
} else if (!strcmp(av[3], "dump")) { |
| 668 |
120 |
debug_report_silo(cli, sc); |
| 669 |
120 |
} else { |
| 670 |
0 |
VCLI_Out(cli, "Unknown operation\n"); |
| 671 |
0 |
VCLI_SetResult(cli, CLIS_PARAM); |
| 672 |
|
} |
| 673 |
240 |
Lck_Unlock(&sc->mtx); |
| 674 |
240 |
} |
| 675 |
|
|
| 676 |
|
static struct cli_proto debug_cmds[] = { |
| 677 |
|
{ CLICMD_DEBUG_PERSISTENT, "d", debug_persistent }, |
| 678 |
|
{ NULL } |
| 679 |
|
}; |
| 680 |
|
|
| 681 |
|
/*-------------------------------------------------------------------- |
| 682 |
|
*/ |
| 683 |
|
|
| 684 |
|
static void |
| 685 |
1160 |
smp_init(void) |
| 686 |
|
{ |
| 687 |
1160 |
lck_smp = Lck_CreateClass(NULL, "smp"); |
| 688 |
1160 |
CLI_AddFuncs(debug_cmds); |
| 689 |
1160 |
smp_oc_realmethods.objfree = SML_methods.objfree; |
| 690 |
1160 |
smp_oc_realmethods.objiterator = SML_methods.objiterator; |
| 691 |
1160 |
smp_oc_realmethods.objgetspace = SML_methods.objgetspace; |
| 692 |
1160 |
smp_oc_realmethods.objextend = SML_methods.objextend; |
| 693 |
1160 |
smp_oc_realmethods.objbocdone = SML_methods.objbocdone; |
| 694 |
1160 |
smp_oc_realmethods.objgetattr = SML_methods.objgetattr; |
| 695 |
1160 |
smp_oc_realmethods.objsetattr = SML_methods.objsetattr; |
| 696 |
1160 |
smp_oc_realmethods.objtouch = LRU_Touch; |
| 697 |
1160 |
smp_oc_realmethods.objfree = smp_oc_objfree; |
| 698 |
1160 |
smp_oc_realmethods.vai_init = SML_methods.vai_init; |
| 699 |
1160 |
} |
| 700 |
|
|
| 701 |
|
/*-------------------------------------------------------------------- |
| 702 |
|
* Pause until all silos have loaded. |
| 703 |
|
*/ |
| 704 |
|
|
| 705 |
|
void |
| 706 |
960 |
SMP_Ready(void) |
| 707 |
|
{ |
| 708 |
|
struct smp_sc *sc; |
| 709 |
|
|
| 710 |
960 |
ASSERT_CLI(); |
| 711 |
960 |
do { |
| 712 |
2284 |
VTAILQ_FOREACH(sc, &silos, list) |
| 713 |
1324 |
if (!(sc->flags & SMP_SC_LOADED)) |
| 714 |
3 |
break; |
| 715 |
963 |
if (sc != NULL) |
| 716 |
3 |
(void)sleep(1); |
| 717 |
963 |
} while (sc != NULL); |
| 718 |
960 |
} |