| | varnish-cache/bin/varnishd/storage/storage_simple.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2007-2015 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include <stdlib.h> |
34 |
|
|
35 |
2718 |
#include "cache/cache_varnishd.h" |
36 |
2216 |
|
37 |
515 |
#include "cache/cache_obj.h" |
38 |
357 |
#include "cache/cache_objhead.h" |
39 |
2216 |
|
40 |
|
#include "storage/storage.h" |
41 |
|
#include "storage/storage_simple.h" |
42 |
|
|
43 |
|
#include "vtim.h" |
44 |
|
|
45 |
3050 |
/* Flags for allocating memory in sml_stv_alloc */ |
46 |
5770 |
#define LESS_MEM_ALLOCED_IS_OK 1 |
47 |
|
|
48 |
|
// marker pointer for sml_trimstore |
49 |
|
static void *trim_once = &trim_once; |
50 |
|
|
51 |
|
/*-------------------------------------------------------------------*/ |
52 |
3970 |
|
53 |
|
static struct storage * |
54 |
|
objallocwithnuke(struct worker *, const struct stevedore *, ssize_t size, |
55 |
|
int flags); |
56 |
|
|
57 |
|
static struct storage * |
58 |
3425 |
sml_stv_alloc(const struct stevedore *stv, ssize_t size, int flags) |
59 |
|
{ |
60 |
|
struct storage *st; |
61 |
|
|
62 |
3425 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
63 |
3425 |
AN(stv->sml_alloc); |
64 |
|
|
65 |
3425 |
if (!(flags & LESS_MEM_ALLOCED_IS_OK)) { |
66 |
615 |
if (size > cache_param->fetch_maxchunksize) |
67 |
0 |
return (NULL); |
68 |
|
else |
69 |
615 |
return (stv->sml_alloc(stv, size)); |
70 |
|
} |
71 |
|
|
72 |
2810 |
if (size > cache_param->fetch_maxchunksize) |
73 |
0 |
size = cache_param->fetch_maxchunksize; |
74 |
|
|
75 |
2810 |
assert(size <= UINT_MAX); /* field limit in struct storage */ |
76 |
|
|
77 |
2945 |
for (;;) { |
78 |
|
/* try to allocate from it */ |
79 |
2945 |
assert(size > 0); |
80 |
2945 |
st = stv->sml_alloc(stv, size); |
81 |
2945 |
if (st != NULL) |
82 |
2799 |
break; |
83 |
|
|
84 |
146 |
if (size <= cache_param->fetch_chunksize) |
85 |
11 |
break; |
86 |
|
|
87 |
135 |
size /= 2; |
88 |
|
} |
89 |
2810 |
CHECK_OBJ_ORNULL(st, STORAGE_MAGIC); |
90 |
2810 |
return (st); |
91 |
3425 |
} |
92 |
|
|
93 |
|
static void |
94 |
4408 |
sml_stv_free(const struct stevedore *stv, struct storage *st) |
95 |
|
{ |
96 |
|
|
97 |
4408 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
98 |
4408 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
99 |
4408 |
if (stv->sml_free != NULL) |
100 |
4408 |
stv->sml_free(st); |
101 |
4408 |
} |
102 |
|
|
103 |
|
/*-------------------------------------------------------------------- |
104 |
|
* This function is called by stevedores ->allocobj() method, which |
105 |
|
* very often will be SML_allocobj() below, to convert a slab |
106 |
|
* of storage into object which the stevedore can then register in its |
107 |
|
* internal state, before returning it to STV_NewObject(). |
108 |
|
* As you probably guessed: All this for persistence. |
109 |
|
*/ |
110 |
|
|
111 |
|
struct object * |
112 |
2839 |
SML_MkObject(const struct stevedore *stv, struct objcore *oc, void *ptr) |
113 |
|
{ |
114 |
|
struct object *o; |
115 |
|
|
116 |
2839 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
117 |
2839 |
AN(stv->methods); |
118 |
2839 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
119 |
|
|
120 |
2839 |
assert(PAOK(ptr)); |
121 |
|
|
122 |
2839 |
o = ptr; |
123 |
2839 |
INIT_OBJ(o, OBJECT_MAGIC); |
124 |
|
|
125 |
2839 |
VTAILQ_INIT(&o->list); |
126 |
|
|
127 |
2839 |
oc->stobj->stevedore = stv; |
128 |
2839 |
oc->stobj->priv = o; |
129 |
2839 |
oc->stobj->priv2 = 0; |
130 |
2839 |
return (o); |
131 |
|
} |
132 |
|
|
133 |
|
/*-------------------------------------------------------------------- |
134 |
|
* This is the default ->allocobj() which all stevedores who do not |
135 |
|
* implement persistent storage can rely on. |
136 |
|
*/ |
137 |
|
|
138 |
|
int v_matchproto_(storage_allocobj_f) |
139 |
2828 |
SML_allocobj(struct worker *wrk, const struct stevedore *stv, |
140 |
|
struct objcore *oc, unsigned wsl) |
141 |
|
{ |
142 |
|
struct object *o; |
143 |
2828 |
struct storage *st = NULL; |
144 |
|
unsigned ltot; |
145 |
|
|
146 |
2828 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
147 |
2828 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
148 |
2828 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
149 |
|
|
150 |
2828 |
AN(stv->sml_alloc); |
151 |
|
|
152 |
2828 |
ltot = sizeof(*o) + PRNDUP(wsl); |
153 |
|
|
154 |
2828 |
do { |
155 |
2831 |
st = stv->sml_alloc(stv, ltot); |
156 |
2831 |
if (st != NULL && st->space < ltot) { |
157 |
0 |
stv->sml_free(st); |
158 |
0 |
st = NULL; |
159 |
0 |
} |
160 |
2831 |
} while (st == NULL && LRU_NukeOne(wrk, stv->lru)); |
161 |
2828 |
if (st == NULL) |
162 |
11 |
return (0); |
163 |
|
|
164 |
2817 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
165 |
2817 |
o = SML_MkObject(stv, oc, st->ptr); |
166 |
2817 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
167 |
2817 |
st->len = sizeof(*o); |
168 |
2817 |
o->objstore = st; |
169 |
2817 |
return (1); |
170 |
2828 |
} |
171 |
|
|
172 |
|
void * v_matchproto_(storage_allocbuf_t) |
173 |
286 |
SML_AllocBuf(struct worker *wrk, const struct stevedore *stv, size_t size, |
174 |
|
uintptr_t *ppriv) |
175 |
|
{ |
176 |
|
struct storage *st; |
177 |
|
|
178 |
286 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
179 |
286 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
180 |
286 |
AN(ppriv); |
181 |
|
|
182 |
286 |
if (size > UINT_MAX) |
183 |
0 |
return (NULL); |
184 |
286 |
st = objallocwithnuke(wrk, stv, size, 0); |
185 |
286 |
if (st == NULL) |
186 |
0 |
return (NULL); |
187 |
286 |
assert(st->space >= size); |
188 |
286 |
st->flags = STORAGE_F_BUFFER; |
189 |
286 |
st->len = size; |
190 |
286 |
*ppriv = (uintptr_t)st; |
191 |
286 |
return (st->ptr); |
192 |
286 |
} |
193 |
|
|
194 |
|
void v_matchproto_(storage_freebuf_t) |
195 |
286 |
SML_FreeBuf(struct worker *wrk, const struct stevedore *stv, uintptr_t priv) |
196 |
|
{ |
197 |
|
struct storage *st; |
198 |
|
|
199 |
286 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
200 |
286 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
201 |
|
|
202 |
286 |
CAST_OBJ_NOTNULL(st, (void *)priv, STORAGE_MAGIC); |
203 |
286 |
assert(st->flags == STORAGE_F_BUFFER); |
204 |
286 |
sml_stv_free(stv, st); |
205 |
286 |
} |
206 |
|
|
207 |
|
/*--------------------------------------------------------------------- |
208 |
|
*/ |
209 |
|
|
210 |
|
static struct object * |
211 |
155171 |
sml_getobj(struct worker *wrk, struct objcore *oc) |
212 |
|
{ |
213 |
|
const struct stevedore *stv; |
214 |
|
struct object *o; |
215 |
|
|
216 |
155171 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
217 |
155171 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
218 |
155171 |
stv = oc->stobj->stevedore; |
219 |
155171 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
220 |
155171 |
if (stv->sml_getobj != NULL) |
221 |
290 |
return (stv->sml_getobj(wrk, oc)); |
222 |
154881 |
if (oc->stobj->priv == NULL) |
223 |
0 |
return (NULL); |
224 |
154881 |
CAST_OBJ_NOTNULL(o, oc->stobj->priv, OBJECT_MAGIC); |
225 |
154881 |
return (o); |
226 |
155171 |
} |
227 |
|
|
228 |
|
static void v_matchproto_(objslim_f) |
229 |
3193 |
sml_slim(struct worker *wrk, struct objcore *oc) |
230 |
|
{ |
231 |
|
const struct stevedore *stv; |
232 |
|
struct object *o; |
233 |
|
struct storage *st, *stn; |
234 |
|
|
235 |
3193 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
236 |
|
|
237 |
3193 |
stv = oc->stobj->stevedore; |
238 |
3193 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
239 |
3193 |
o = sml_getobj(wrk, oc); |
240 |
3193 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
241 |
|
|
242 |
|
#define OBJ_AUXATTR(U, l) \ |
243 |
|
do { \ |
244 |
|
if (o->aa_##l != NULL) { \ |
245 |
|
sml_stv_free(stv, o->aa_##l); \ |
246 |
|
o->aa_##l = NULL; \ |
247 |
|
} \ |
248 |
|
} while (0); |
249 |
|
#include "tbl/obj_attr.h" |
250 |
|
|
251 |
4950 |
VTAILQ_FOREACH_SAFE(st, &o->list, list, stn) { |
252 |
1757 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
253 |
1757 |
VTAILQ_REMOVE(&o->list, st, list); |
254 |
1757 |
sml_stv_free(stv, st); |
255 |
1757 |
} |
256 |
|
} |
257 |
|
|
258 |
|
static void |
259 |
2856 |
sml_bocfini(const struct stevedore *stv, struct boc *boc) |
260 |
|
{ |
261 |
|
struct storage *st; |
262 |
|
|
263 |
2856 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
264 |
2856 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
265 |
|
|
266 |
2856 |
if (boc->stevedore_priv == NULL || |
267 |
1879 |
boc->stevedore_priv == trim_once) |
268 |
2654 |
return; |
269 |
|
|
270 |
|
/* Free any leftovers from Trim */ |
271 |
202 |
TAKE_OBJ_NOTNULL(st, &boc->stevedore_priv, STORAGE_MAGIC); |
272 |
202 |
sml_stv_free(stv, st); |
273 |
2856 |
} |
274 |
|
|
275 |
|
/* |
276 |
|
* called in two cases: |
277 |
|
* - oc->boc == NULL: cache object on LRU freed |
278 |
|
* - oc->boc != NULL: cache object replaced for backend error |
279 |
|
*/ |
280 |
|
static void v_matchproto_(objfree_f) |
281 |
1788 |
sml_objfree(struct worker *wrk, struct objcore *oc) |
282 |
|
{ |
283 |
|
const struct stevedore *stv; |
284 |
|
struct storage *st; |
285 |
|
struct object *o; |
286 |
|
|
287 |
1788 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
288 |
1788 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
289 |
1788 |
stv = oc->stobj->stevedore; |
290 |
1788 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
291 |
1788 |
CAST_OBJ_NOTNULL(o, oc->stobj->priv, OBJECT_MAGIC); |
292 |
|
|
293 |
1788 |
sml_slim(wrk, oc); |
294 |
1788 |
st = o->objstore; |
295 |
1788 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
296 |
1788 |
FINI_OBJ(o); |
297 |
|
|
298 |
1788 |
if (oc->boc != NULL) |
299 |
21 |
sml_bocfini(stv, oc->boc); |
300 |
1767 |
else if (stv->lru != NULL) |
301 |
1767 |
LRU_Remove(oc); |
302 |
|
|
303 |
1788 |
sml_stv_free(stv, st); |
304 |
|
|
305 |
1788 |
memset(oc->stobj, 0, sizeof oc->stobj); |
306 |
|
|
307 |
1788 |
wrk->stats->n_object--; |
308 |
1788 |
} |
309 |
|
|
310 |
|
// kept for reviewers - XXX remove later |
311 |
|
#undef VAI_DBG |
312 |
|
|
313 |
|
struct sml_hdl { |
314 |
|
struct vai_hdl_preamble preamble; |
315 |
|
#define SML_HDL_MAGIC 0x37dfd996 |
316 |
|
struct vai_qe qe; |
317 |
|
struct pool_task task; // unfortunate |
318 |
|
struct ws *ws; // NULL is malloc() |
319 |
|
struct objcore *oc; |
320 |
|
struct object *obj; |
321 |
|
const struct stevedore *stv; |
322 |
|
struct boc *boc; |
323 |
|
|
324 |
|
struct storage *st; // updated by _lease() |
325 |
|
|
326 |
|
// only for _lease_boc() |
327 |
|
uint64_t st_off; // already returned fragment of current st |
328 |
|
uint64_t avail, returned; |
329 |
|
struct storage *last; // to resume, held back by _return() |
330 |
|
}; |
331 |
|
|
332 |
|
static inline uint64_t |
333 |
3132 |
st2lease(const struct storage *st) |
334 |
|
{ |
335 |
3132 |
uint64_t r = (uintptr_t)st; |
336 |
|
|
337 |
|
if (sizeof(void *) < 8) //lint !e506 !e774 |
338 |
|
r <<= 1; |
339 |
|
|
340 |
3132 |
return (r); |
341 |
|
} |
342 |
|
|
343 |
|
static inline struct storage * |
344 |
3828 |
lease2st(uint64_t l) |
345 |
|
{ |
346 |
|
|
347 |
|
if (sizeof(void *) < 8) //lint !e506 !e774 |
348 |
|
l >>= 1; |
349 |
|
|
350 |
3828 |
return ((void *)(uintptr_t)l); |
351 |
|
} |
352 |
|
|
353 |
|
static inline void |
354 |
1887 |
sml_ai_viov_fill(struct viov *viov, struct storage *st) |
355 |
|
{ |
356 |
1887 |
viov->iov.iov_base = TRUST_ME(st->ptr); |
357 |
1887 |
viov->iov.iov_len = st->len; |
358 |
1887 |
viov->lease = st2lease(st); |
359 |
1887 |
VAI_ASSERT_LEASE(viov->lease); |
360 |
1887 |
} |
361 |
|
|
362 |
|
// sml has no mechanism to notify "I got free space again now" |
363 |
|
// (we could add that, but because storage.h is used in mgt, a first attempt |
364 |
|
// looks at least like this would cause some include spill for vai_q_head or |
365 |
|
// something similar) |
366 |
|
// |
367 |
|
// So anyway, to get ahead we just implement a pretty stupid "call the notify |
368 |
|
// some time later" on a thread |
369 |
|
static void |
370 |
0 |
sml_ai_later_task(struct worker *wrk, void *priv) |
371 |
|
{ |
372 |
|
struct sml_hdl *hdl; |
373 |
0 |
const vtim_dur dur = 0.0042; |
374 |
|
|
375 |
0 |
(void)wrk; |
376 |
0 |
VTIM_sleep(dur); |
377 |
0 |
CAST_VAI_HDL_NOTNULL(hdl, priv, SML_HDL_MAGIC); |
378 |
0 |
memset(&hdl->task, 0, sizeof hdl->task); |
379 |
0 |
hdl->qe.cb(hdl, hdl->qe.priv); |
380 |
0 |
} |
381 |
|
static void |
382 |
0 |
sml_ai_later(struct worker *wrk, struct sml_hdl *hdl) |
383 |
|
{ |
384 |
0 |
AZ(hdl->task.func); |
385 |
0 |
AZ(hdl->task.priv); |
386 |
0 |
hdl->task.func = sml_ai_later_task; |
387 |
0 |
hdl->task.priv = hdl; |
388 |
0 |
AZ(Pool_Task(wrk->pool, &hdl->task, TASK_QUEUE_BG)); |
389 |
0 |
} |
390 |
|
|
391 |
|
|
392 |
|
static int |
393 |
16 |
sml_ai_buffer(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
394 |
|
{ |
395 |
|
const struct stevedore *stv; |
396 |
|
struct sml_hdl *hdl; |
397 |
|
struct storage *st; |
398 |
|
struct viov *vio; |
399 |
16 |
int r = 0; |
400 |
|
|
401 |
16 |
(void) wrk; |
402 |
16 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
403 |
16 |
stv = hdl->stv; |
404 |
16 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
405 |
|
|
406 |
32 |
VSCARAB_FOREACH(vio, scarab) |
407 |
16 |
if (vio->iov.iov_len > UINT_MAX) |
408 |
0 |
return (-EINVAL); |
409 |
|
|
410 |
32 |
VSCARAB_FOREACH(vio, scarab) { |
411 |
16 |
st = objallocwithnuke(wrk, stv, vio->iov.iov_len, 0); |
412 |
16 |
if (st == NULL) |
413 |
0 |
break; |
414 |
16 |
assert(st->space >= vio->iov.iov_len); |
415 |
16 |
st->flags = STORAGE_F_BUFFER; |
416 |
16 |
st->len = st->space; |
417 |
|
|
418 |
16 |
sml_ai_viov_fill(vio, st); |
419 |
16 |
r++; |
420 |
16 |
} |
421 |
16 |
if (r == 0) { |
422 |
0 |
sml_ai_later(wrk, hdl); |
423 |
0 |
r = -EAGAIN; |
424 |
0 |
} |
425 |
16 |
return (r); |
426 |
16 |
} |
427 |
|
|
428 |
|
static int |
429 |
3344 |
sml_ai_lease_simple(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
430 |
|
{ |
431 |
|
struct storage *st; |
432 |
|
struct sml_hdl *hdl; |
433 |
|
struct viov *viov; |
434 |
3344 |
int r = 0; |
435 |
|
|
436 |
3344 |
(void) wrk; |
437 |
3344 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
438 |
3344 |
VSCARAB_CHECK_NOTNULL(scarab); |
439 |
|
|
440 |
3344 |
AZ(hdl->st_off); |
441 |
3344 |
st = hdl->st; |
442 |
5215 |
while (st != NULL && (viov = VSCARAB_GET(scarab)) != NULL) { |
443 |
1871 |
CHECK_OBJ(st, STORAGE_MAGIC); |
444 |
1871 |
sml_ai_viov_fill(viov, st); |
445 |
1871 |
r++; |
446 |
1871 |
st = VTAILQ_PREV(st, storagehead, list); |
447 |
|
} |
448 |
3344 |
hdl->st = st; |
449 |
3344 |
if (st == NULL) |
450 |
3312 |
scarab->flags |= VSCARAB_F_END; |
451 |
3344 |
return (r); |
452 |
|
} |
453 |
|
|
454 |
|
/* |
455 |
|
* on leases while streaming (with a boc): |
456 |
|
* |
457 |
|
* SML uses the lease return facility to implement the "free behind" for |
458 |
|
* OC_F_TRANSIENT objects. When streaming, we also return leases on |
459 |
|
* fragments of sts, but we must only "free behind" when we are done with the |
460 |
|
* last fragment. |
461 |
|
* |
462 |
|
* So we use a magic lease to signal "this is only a fragment", which we ignore |
463 |
|
* on returns |
464 |
|
*/ |
465 |
|
|
466 |
|
static int |
467 |
3389 |
sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
468 |
|
{ |
469 |
3389 |
enum boc_state_e state = BOS_INVALID; |
470 |
|
struct storage *next; |
471 |
|
struct sml_hdl *hdl; |
472 |
|
struct viov *viov; |
473 |
3389 |
int r = 0; |
474 |
|
|
475 |
3389 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
476 |
3389 |
VSCARAB_CHECK_NOTNULL(scarab); |
477 |
|
|
478 |
3389 |
if (hdl->avail == hdl->returned) { |
479 |
6712 |
hdl->avail = ObjVAIGetExtend(wrk, hdl->oc, hdl->returned, |
480 |
3356 |
&state, &hdl->qe); |
481 |
3356 |
if (state == BOS_FAILED) { |
482 |
15 |
hdl->last = NULL; |
483 |
15 |
return (-EPIPE); |
484 |
|
} |
485 |
3341 |
else if (state == BOS_FINISHED) |
486 |
730 |
(void)0; |
487 |
2611 |
else if (hdl->avail == hdl->returned) { |
488 |
|
// ObjVAIGetExtend() has scheduled a notification |
489 |
1337 |
if (hdl->boc->transit_buffer > 0) |
490 |
329 |
return (-ENOBUFS); |
491 |
|
else |
492 |
1008 |
return (-EAGAIN); |
493 |
|
} |
494 |
|
else |
495 |
1274 |
assert(state < BOS_FINISHED); |
496 |
2004 |
} |
497 |
2037 |
Lck_Lock(&hdl->boc->mtx); |
498 |
2037 |
if (hdl->st == NULL && hdl->last != NULL) { |
499 |
|
/* when the "last" st completed, we did not yet have a next, so |
500 |
|
* resume from there. Because "last" might have been returned and |
501 |
|
* deleted, we can not just use the pointer, but rather need to |
502 |
|
* iterate the st list. |
503 |
|
* if we can not find "last", it also has been returned and |
504 |
|
* deleted, and the current write head (VTAILQ_LAST) is our next |
505 |
|
* st, which can also be null if we are done. |
506 |
|
*/ |
507 |
11942 |
VTAILQ_FOREACH_REVERSE(next, &hdl->obj->list, storagehead, list) { |
508 |
11942 |
if (next == hdl->last) { |
509 |
863 |
hdl->st = VTAILQ_PREV(next, storagehead, list); |
510 |
863 |
break; |
511 |
|
} |
512 |
11079 |
} |
513 |
863 |
} |
514 |
2037 |
hdl->last = NULL; |
515 |
2037 |
if (hdl->st == NULL) { |
516 |
688 |
assert(hdl->returned == 0 || hdl->avail == hdl->returned); |
517 |
688 |
hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); |
518 |
688 |
} |
519 |
2037 |
if (hdl->st == NULL) |
520 |
1 |
assert(hdl->avail == hdl->returned); |
521 |
|
|
522 |
3497 |
while (hdl->avail > hdl->returned && (viov = VSCARAB_GET(scarab)) != NULL) { |
523 |
1460 |
CHECK_OBJ_NOTNULL(hdl->st, STORAGE_MAGIC); // ObjVAIGetExtend ensures |
524 |
1460 |
assert(hdl->st_off <= hdl->st->space); |
525 |
1460 |
size_t av = hdl->avail - hdl->returned; |
526 |
1460 |
size_t l = hdl->st->space - hdl->st_off; |
527 |
1460 |
AN(l); |
528 |
1460 |
if (l > av) |
529 |
215 |
l = av; |
530 |
1460 |
viov->iov.iov_base = TRUST_ME(hdl->st->ptr + hdl->st_off); |
531 |
1460 |
viov->iov.iov_len = l; |
532 |
1460 |
if (hdl->st_off + l == hdl->st->space) { |
533 |
1245 |
next = VTAILQ_PREV(hdl->st, storagehead, list); |
534 |
1245 |
AZ(hdl->last); |
535 |
1245 |
if (next == NULL) |
536 |
872 |
hdl->last = hdl->st; |
537 |
|
else |
538 |
373 |
CHECK_OBJ(next, STORAGE_MAGIC); |
539 |
|
#ifdef VAI_DBG |
540 |
|
VSLb(wrk->vsl, SLT_Debug, "off %zu + l %zu == space st %p next st %p stvprv %p", |
541 |
|
hdl->st_off, l, hdl->st, next, hdl->boc->stevedore_priv); |
542 |
|
#endif |
543 |
1245 |
viov->lease = st2lease(hdl->st); |
544 |
1245 |
hdl->st_off = 0; |
545 |
1245 |
hdl->st = next; |
546 |
1245 |
} |
547 |
|
else { |
548 |
215 |
viov->lease = VAI_LEASE_NORET; |
549 |
215 |
hdl->st_off += l; |
550 |
|
} |
551 |
1460 |
hdl->returned += l; |
552 |
1460 |
VAI_ASSERT_LEASE(viov->lease); |
553 |
1460 |
r++; |
554 |
|
} |
555 |
|
|
556 |
2037 |
Lck_Unlock(&hdl->boc->mtx); |
557 |
2037 |
if (state != BOS_FINISHED && hdl->avail == hdl->returned) { |
558 |
2608 |
hdl->avail = ObjVAIGetExtend(wrk, hdl->oc, hdl->returned, |
559 |
1304 |
&state, &hdl->qe); |
560 |
1304 |
} |
561 |
2037 |
if (state == BOS_FINISHED && hdl->avail == hdl->returned) |
562 |
730 |
scarab->flags |= VSCARAB_F_END; |
563 |
2037 |
return (r); |
564 |
3389 |
} |
565 |
|
|
566 |
|
// return only buffers, used if object is not streaming |
567 |
|
static void v_matchproto_(vai_return_f) |
568 |
2324 |
sml_ai_return_buffers(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
569 |
|
{ |
570 |
|
struct storage *st; |
571 |
|
struct sml_hdl *hdl; |
572 |
|
uint64_t *p; |
573 |
|
|
574 |
2324 |
(void) wrk; |
575 |
2324 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
576 |
|
|
577 |
4849 |
VSCARET_FOREACH(p, scaret) { |
578 |
2525 |
if (*p == VAI_LEASE_NORET) |
579 |
228 |
continue; |
580 |
2297 |
CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); |
581 |
2297 |
if ((st->flags & STORAGE_F_BUFFER) == 0) |
582 |
2281 |
continue; |
583 |
16 |
sml_stv_free(hdl->stv, st); |
584 |
16 |
} |
585 |
2324 |
VSCARET_INIT(scaret, scaret->capacity); |
586 |
2324 |
} |
587 |
|
|
588 |
|
// generic return for buffers and object leases, used when streaming |
589 |
|
static void v_matchproto_(vai_return_f) |
590 |
704 |
sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
591 |
|
{ |
592 |
|
struct storage *st; |
593 |
|
struct sml_hdl *hdl; |
594 |
|
uint64_t *p; |
595 |
|
|
596 |
704 |
(void) wrk; |
597 |
704 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
598 |
704 |
VSCARET_CHECK_NOTNULL(scaret); |
599 |
704 |
if (scaret->used == 0) |
600 |
0 |
return; |
601 |
|
|
602 |
|
// callback is only registered if needed |
603 |
704 |
assert(hdl->boc != NULL && (hdl->oc->flags & OC_F_TRANSIENT) != 0); |
604 |
|
|
605 |
|
// filter noret and last |
606 |
704 |
VSCARET_LOCAL(todo, scaret->used); |
607 |
1574 |
VSCARET_FOREACH(p, scaret) { |
608 |
870 |
if (*p == VAI_LEASE_NORET) |
609 |
35 |
continue; |
610 |
835 |
CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); |
611 |
835 |
if (st == hdl->last) |
612 |
487 |
continue; |
613 |
348 |
VSCARET_ADD(todo, *p); |
614 |
348 |
} |
615 |
704 |
VSCARET_INIT(scaret, scaret->capacity); |
616 |
|
|
617 |
704 |
Lck_Lock(&hdl->boc->mtx); |
618 |
1052 |
VSCARET_FOREACH(p, todo) { |
619 |
348 |
CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); |
620 |
348 |
if ((st->flags & STORAGE_F_BUFFER) != 0) |
621 |
0 |
continue; |
622 |
348 |
VTAILQ_REMOVE(&hdl->obj->list, st, list); |
623 |
348 |
if (st == hdl->boc->stevedore_priv) |
624 |
0 |
hdl->boc->stevedore_priv = trim_once; |
625 |
348 |
} |
626 |
704 |
Lck_Unlock(&hdl->boc->mtx); |
627 |
|
|
628 |
1052 |
VSCARET_FOREACH(p, todo) { |
629 |
348 |
CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); |
630 |
348 |
sml_stv_free(hdl->stv, st); |
631 |
348 |
} |
632 |
704 |
} |
633 |
|
|
634 |
|
static void v_matchproto_(vai_fini_f) |
635 |
2452 |
sml_ai_fini(struct worker *wrk, vai_hdl *vai_hdlp) |
636 |
|
{ |
637 |
|
struct sml_hdl *hdl; |
638 |
|
|
639 |
2452 |
AN(vai_hdlp); |
640 |
2452 |
CAST_VAI_HDL_NOTNULL(hdl, *vai_hdlp, SML_HDL_MAGIC); |
641 |
2452 |
*vai_hdlp = NULL; |
642 |
|
|
643 |
2452 |
if (hdl->boc != NULL) { |
644 |
755 |
ObjVAICancel(wrk, hdl->boc, &hdl->qe); |
645 |
755 |
HSH_DerefBoc(wrk, hdl->oc); |
646 |
755 |
hdl->boc = NULL; |
647 |
755 |
} |
648 |
|
|
649 |
2452 |
if (hdl->ws != NULL) |
650 |
48 |
WS_Release(hdl->ws, 0); |
651 |
|
else |
652 |
2404 |
free(hdl); |
653 |
2452 |
} |
654 |
|
|
655 |
|
static vai_hdl v_matchproto_(vai_init_f) |
656 |
2452 |
sml_ai_init(struct worker *wrk, struct objcore *oc, struct ws *ws, |
657 |
|
vai_notify_cb *notify, void *notify_priv) |
658 |
|
{ |
659 |
|
struct sml_hdl *hdl; |
660 |
2452 |
const size_t sz = sizeof *hdl; |
661 |
|
|
662 |
2452 |
if (ws != NULL && WS_ReserveSize(ws, (unsigned)sz)) |
663 |
48 |
hdl = WS_Reservation(ws); |
664 |
|
else { |
665 |
2404 |
hdl = malloc(sz); |
666 |
2404 |
ws = NULL; |
667 |
|
} |
668 |
|
|
669 |
2452 |
AN(hdl); |
670 |
2452 |
INIT_VAI_HDL(hdl, SML_HDL_MAGIC); |
671 |
2452 |
hdl->preamble.vai_lease = sml_ai_lease_simple; |
672 |
2452 |
hdl->preamble.vai_buffer = sml_ai_buffer; |
673 |
2452 |
hdl->preamble.vai_return = sml_ai_return_buffers; |
674 |
2452 |
hdl->preamble.vai_fini = sml_ai_fini; |
675 |
2452 |
hdl->ws = ws; |
676 |
|
|
677 |
2452 |
hdl->oc = oc; |
678 |
2452 |
hdl->obj = sml_getobj(wrk, oc); |
679 |
2452 |
CHECK_OBJ_NOTNULL(hdl->obj, OBJECT_MAGIC); |
680 |
2452 |
hdl->stv = oc->stobj->stevedore; |
681 |
2452 |
CHECK_OBJ_NOTNULL(hdl->stv, STEVEDORE_MAGIC); |
682 |
|
|
683 |
2452 |
hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); |
684 |
2452 |
CHECK_OBJ_ORNULL(hdl->st, STORAGE_MAGIC); |
685 |
|
|
686 |
2452 |
hdl->qe.magic = VAI_Q_MAGIC; |
687 |
2452 |
hdl->qe.cb = notify; |
688 |
2452 |
hdl->qe.hdl = hdl; |
689 |
2452 |
hdl->qe.priv = notify_priv; |
690 |
|
|
691 |
2452 |
hdl->boc = HSH_RefBoc(oc); |
692 |
2452 |
if (hdl->boc == NULL) |
693 |
1697 |
return (hdl); |
694 |
|
/* we only initialize notifications if we have a boc, so |
695 |
|
* any wrong attempt triggers magic checks. |
696 |
|
*/ |
697 |
755 |
hdl->preamble.vai_lease = sml_ai_lease_boc; |
698 |
755 |
if ((hdl->oc->flags & OC_F_TRANSIENT) != 0) |
699 |
342 |
hdl->preamble.vai_return = sml_ai_return; |
700 |
755 |
return (hdl); |
701 |
2452 |
} |
702 |
|
|
703 |
|
/* |
704 |
|
* trivial notification to allow the iterator to simply block |
705 |
|
*/ |
706 |
|
struct sml_notify { |
707 |
|
unsigned magic; |
708 |
|
#define SML_NOTIFY_MAGIC 0x4589af31 |
709 |
|
unsigned hasmore; |
710 |
|
pthread_mutex_t mtx; |
711 |
|
pthread_cond_t cond; |
712 |
|
}; |
713 |
|
|
714 |
|
static void |
715 |
2404 |
sml_notify_init(struct sml_notify *sn) |
716 |
|
{ |
717 |
|
|
718 |
2404 |
INIT_OBJ(sn, SML_NOTIFY_MAGIC); |
719 |
2404 |
AZ(pthread_mutex_init(&sn->mtx, NULL)); |
720 |
2404 |
AZ(pthread_cond_init(&sn->cond, NULL)); |
721 |
2404 |
} |
722 |
|
|
723 |
|
static void |
724 |
2403 |
sml_notify_fini(struct sml_notify *sn) |
725 |
|
{ |
726 |
|
|
727 |
2403 |
CHECK_OBJ_NOTNULL(sn, SML_NOTIFY_MAGIC); |
728 |
2403 |
AZ(pthread_mutex_destroy(&sn->mtx)); |
729 |
2403 |
AZ(pthread_cond_destroy(&sn->cond)); |
730 |
2403 |
} |
731 |
|
|
732 |
|
static void v_matchproto_(vai_notify_cb) |
733 |
1343 |
sml_notify(vai_hdl hdl, void *priv) |
734 |
|
{ |
735 |
|
struct sml_notify *sn; |
736 |
|
|
737 |
1343 |
(void) hdl; |
738 |
1343 |
CAST_OBJ_NOTNULL(sn, priv, SML_NOTIFY_MAGIC); |
739 |
1343 |
AZ(pthread_mutex_lock(&sn->mtx)); |
740 |
1343 |
sn->hasmore = 1; |
741 |
1343 |
AZ(pthread_cond_signal(&sn->cond)); |
742 |
1343 |
AZ(pthread_mutex_unlock(&sn->mtx)); |
743 |
|
|
744 |
1343 |
} |
745 |
|
|
746 |
|
static void |
747 |
1318 |
sml_notify_wait(struct sml_notify *sn) |
748 |
|
{ |
749 |
|
|
750 |
1318 |
CHECK_OBJ_NOTNULL(sn, SML_NOTIFY_MAGIC); |
751 |
1318 |
AZ(pthread_mutex_lock(&sn->mtx)); |
752 |
2265 |
while (sn->hasmore == 0) |
753 |
947 |
AZ(pthread_cond_wait(&sn->cond, &sn->mtx)); |
754 |
1318 |
AN(sn->hasmore); |
755 |
1318 |
sn->hasmore = 0; |
756 |
1318 |
AZ(pthread_mutex_unlock(&sn->mtx)); |
757 |
1318 |
} |
758 |
|
|
759 |
|
static int v_matchproto_(objiterator_f) |
760 |
2404 |
sml_iterator(struct worker *wrk, struct objcore *oc, |
761 |
|
void *priv, objiterate_f *func, int final) |
762 |
|
{ |
763 |
|
struct sml_notify sn; |
764 |
|
struct viov *vio, *last; |
765 |
|
unsigned u, uu; |
766 |
|
vai_hdl hdl; |
767 |
|
int nn, r, r2, islast; |
768 |
|
|
769 |
2404 |
VSCARAB_LOCAL(scarab, 16); |
770 |
2404 |
VSCARET_LOCAL(scaret, 16); |
771 |
|
|
772 |
2404 |
(void) final; // phase out? |
773 |
2404 |
sml_notify_init(&sn); |
774 |
2404 |
hdl = ObjVAIinit(wrk, oc, NULL, sml_notify, &sn); |
775 |
2404 |
AN(hdl); |
776 |
|
|
777 |
2404 |
r = u = 0; |
778 |
|
|
779 |
2404 |
do { |
780 |
5345 |
do { |
781 |
6648 |
nn = ObjVAIlease(wrk, hdl, scarab); |
782 |
6648 |
if (nn <= 0 || scarab->flags & VSCARAB_F_END) |
783 |
5344 |
break; |
784 |
1304 |
} while (scarab->used < scarab->capacity); |
785 |
|
|
786 |
|
/* |
787 |
|
* nn is the wait/return action or 0 |
788 |
|
* nn tells us if to flush |
789 |
|
*/ |
790 |
5345 |
uu = u; |
791 |
5345 |
last = VSCARAB_LAST(scarab); |
792 |
8575 |
VSCARAB_FOREACH(vio, scarab) { |
793 |
3283 |
islast = vio == last; |
794 |
3283 |
AZ(u & OBJ_ITER_END); |
795 |
3283 |
if (islast && scarab->flags & VSCARAB_F_END) |
796 |
1658 |
u |= OBJ_ITER_END; |
797 |
|
|
798 |
|
// flush if it is the scarab's last IOV and we will block next |
799 |
|
// or if we need space in the return leases array |
800 |
3283 |
uu = u; |
801 |
3283 |
if ((islast && nn < 0) || scaret->used == scaret->capacity - 1) |
802 |
1255 |
uu |= OBJ_ITER_FLUSH; |
803 |
3283 |
r = func(priv, uu, vio->iov.iov_base, vio->iov.iov_len); |
804 |
3283 |
if (r != 0) |
805 |
53 |
break; |
806 |
|
|
807 |
|
// sufficient space ensured by capacity check above |
808 |
3230 |
VSCARET_ADD(scaret, vio->lease); |
809 |
|
|
810 |
|
// whenever we have flushed, return leases |
811 |
3230 |
if ((uu & OBJ_ITER_FLUSH) && scaret->used > 0) |
812 |
1237 |
ObjVAIreturn(wrk, hdl, scaret); |
813 |
3230 |
} |
814 |
|
|
815 |
|
// return leases which we did not use if error (break) |
816 |
5398 |
VSCARAB_FOREACH_RESUME(vio, scarab) { |
817 |
53 |
if (scaret->used == scaret->capacity) |
818 |
0 |
ObjVAIreturn(wrk, hdl, scaret); |
819 |
53 |
VSCARET_ADD(scaret, vio->lease); |
820 |
53 |
} |
821 |
|
|
822 |
|
// we have now completed the scarab |
823 |
5345 |
VSCARAB_INIT(scarab, scarab->capacity); |
824 |
|
|
825 |
|
// flush before blocking if we did not already |
826 |
5345 |
if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN) && |
827 |
5292 |
(uu & OBJ_ITER_FLUSH) == 0) { |
828 |
81 |
r = func(priv, OBJ_ITER_FLUSH, NULL, 0); |
829 |
81 |
if (scaret->used > 0) |
830 |
0 |
ObjVAIreturn(wrk, hdl, scaret); |
831 |
81 |
} |
832 |
|
|
833 |
5345 |
if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN)) { |
834 |
1317 |
assert(scaret->used <= 1); |
835 |
1317 |
sml_notify_wait(&sn); |
836 |
1317 |
} |
837 |
4028 |
else if (r == 0 && nn < 0) |
838 |
15 |
r = -1; |
839 |
5345 |
} while (nn != 0 && r == 0); |
840 |
|
|
841 |
2404 |
if ((u & OBJ_ITER_END) == 0) { |
842 |
746 |
r2 = func(priv, OBJ_ITER_END, NULL, 0); |
843 |
746 |
if (r == 0) |
844 |
713 |
r = r2; |
845 |
746 |
} |
846 |
|
|
847 |
2404 |
if (scaret->used > 0) |
848 |
1676 |
ObjVAIreturn(wrk, hdl, scaret); |
849 |
|
|
850 |
2404 |
ObjVAIfini(wrk, &hdl); |
851 |
2404 |
sml_notify_fini(&sn); |
852 |
|
|
853 |
2404 |
return (r); |
854 |
|
} |
855 |
|
|
856 |
|
/*-------------------------------------------------------------------- |
857 |
|
*/ |
858 |
|
|
859 |
|
static struct storage * |
860 |
3223 |
objallocwithnuke(struct worker *wrk, const struct stevedore *stv, ssize_t size, |
861 |
|
int flags) |
862 |
|
{ |
863 |
3223 |
struct storage *st = NULL; |
864 |
|
|
865 |
3223 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
866 |
3223 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
867 |
|
|
868 |
3223 |
if (size > cache_param->fetch_maxchunksize) { |
869 |
2 |
if (!(flags & LESS_MEM_ALLOCED_IS_OK)) |
870 |
0 |
return (NULL); |
871 |
2 |
size = cache_param->fetch_maxchunksize; |
872 |
2 |
} |
873 |
|
|
874 |
3223 |
assert(size <= UINT_MAX); /* field limit in struct storage */ |
875 |
|
|
876 |
3223 |
do { |
877 |
|
/* try to allocate from it */ |
878 |
3231 |
st = sml_stv_alloc(stv, size, flags); |
879 |
3231 |
if (st != NULL) |
880 |
3218 |
break; |
881 |
|
|
882 |
|
/* no luck; try to free some space and keep trying */ |
883 |
13 |
if (stv->lru == NULL) |
884 |
0 |
break; |
885 |
13 |
} while (LRU_NukeOne(wrk, stv->lru)); |
886 |
|
|
887 |
3223 |
CHECK_OBJ_ORNULL(st, STORAGE_MAGIC); |
888 |
3223 |
return (st); |
889 |
3223 |
} |
890 |
|
|
891 |
|
static int v_matchproto_(objgetspace_f) |
892 |
58094 |
sml_getspace(struct worker *wrk, struct objcore *oc, ssize_t *sz, |
893 |
|
uint8_t **ptr) |
894 |
|
{ |
895 |
|
struct object *o; |
896 |
|
struct storage *st; |
897 |
|
|
898 |
58094 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
899 |
58094 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
900 |
58094 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
901 |
58094 |
AN(sz); |
902 |
58094 |
AN(ptr); |
903 |
58094 |
if (*sz == 0) |
904 |
54482 |
*sz = cache_param->fetch_chunksize; |
905 |
58094 |
assert(*sz > 0); |
906 |
58094 |
if (oc->boc->transit_buffer > 0) |
907 |
491 |
*sz = vmin_t(ssize_t, *sz, oc->boc->transit_buffer); |
908 |
|
|
909 |
58094 |
o = sml_getobj(wrk, oc); |
910 |
58094 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
911 |
58094 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
912 |
|
|
913 |
58094 |
st = VTAILQ_FIRST(&o->list); |
914 |
58094 |
if (st != NULL && st->len < st->space) { |
915 |
55290 |
*sz = st->space - st->len; |
916 |
55290 |
*ptr = st->ptr + st->len; |
917 |
55290 |
assert (*sz > 0); |
918 |
55290 |
return (1); |
919 |
|
} |
920 |
|
|
921 |
2804 |
st = objallocwithnuke(wrk, oc->stobj->stevedore, *sz, |
922 |
|
LESS_MEM_ALLOCED_IS_OK); |
923 |
2804 |
if (st == NULL) |
924 |
5 |
return (0); |
925 |
|
|
926 |
2799 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
927 |
2799 |
Lck_Lock(&oc->boc->mtx); |
928 |
2799 |
VTAILQ_INSERT_HEAD(&o->list, st, list); |
929 |
2799 |
Lck_Unlock(&oc->boc->mtx); |
930 |
|
|
931 |
2799 |
*sz = st->space - st->len; |
932 |
2799 |
assert (*sz > 0); |
933 |
2799 |
*ptr = st->ptr + st->len; |
934 |
2799 |
return (1); |
935 |
58094 |
} |
936 |
|
|
937 |
|
static void v_matchproto_(objextend_f) |
938 |
56621 |
sml_extend(struct worker *wrk, struct objcore *oc, ssize_t l) |
939 |
|
{ |
940 |
|
struct object *o; |
941 |
|
struct storage *st; |
942 |
|
|
943 |
56621 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
944 |
56621 |
assert(l > 0); |
945 |
|
|
946 |
56621 |
o = sml_getobj(wrk, oc); |
947 |
56621 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
948 |
56621 |
st = VTAILQ_FIRST(&o->list); |
949 |
56621 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
950 |
56621 |
assert(st->len + l <= st->space); |
951 |
56621 |
st->len += l; |
952 |
56621 |
} |
953 |
|
|
954 |
|
static void v_matchproto_(objtrimstore_f) |
955 |
1879 |
sml_trimstore(struct worker *wrk, struct objcore *oc) |
956 |
|
{ |
957 |
|
const struct stevedore *stv; |
958 |
|
struct storage *st, *st1; |
959 |
|
struct object *o; |
960 |
|
|
961 |
1879 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
962 |
1879 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
963 |
1879 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
964 |
|
|
965 |
1879 |
stv = oc->stobj->stevedore; |
966 |
1879 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
967 |
|
|
968 |
1879 |
if (oc->boc->stevedore_priv != NULL) |
969 |
0 |
WRONG("sml_trimstore already called"); |
970 |
1879 |
oc->boc->stevedore_priv = trim_once; |
971 |
|
|
972 |
1879 |
if (stv->sml_free == NULL) |
973 |
0 |
return; |
974 |
|
|
975 |
1879 |
o = sml_getobj(wrk, oc); |
976 |
1879 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
977 |
1879 |
st = VTAILQ_FIRST(&o->list); |
978 |
|
|
979 |
1879 |
if (st == NULL) |
980 |
0 |
return; |
981 |
|
|
982 |
1879 |
if (st->len == 0) { |
983 |
8 |
Lck_Lock(&oc->boc->mtx); |
984 |
8 |
VTAILQ_REMOVE(&o->list, st, list); |
985 |
8 |
Lck_Unlock(&oc->boc->mtx); |
986 |
|
/* sml_bocdone frees this */ |
987 |
8 |
oc->boc->stevedore_priv = st; |
988 |
8 |
return; |
989 |
|
} |
990 |
|
|
991 |
1871 |
if (st->space - st->len < 512) |
992 |
1677 |
return; |
993 |
|
|
994 |
194 |
st1 = sml_stv_alloc(stv, st->len, 0); |
995 |
194 |
if (st1 == NULL) |
996 |
0 |
return; |
997 |
194 |
assert(st1->space >= st->len); |
998 |
|
|
999 |
194 |
memcpy(st1->ptr, st->ptr, st->len); |
1000 |
194 |
st1->len = st->len; |
1001 |
194 |
Lck_Lock(&oc->boc->mtx); |
1002 |
194 |
VTAILQ_REMOVE(&o->list, st, list); |
1003 |
194 |
VTAILQ_INSERT_HEAD(&o->list, st1, list); |
1004 |
194 |
Lck_Unlock(&oc->boc->mtx); |
1005 |
|
/* sml_bocdone frees this */ |
1006 |
194 |
oc->boc->stevedore_priv = st; |
1007 |
1879 |
} |
1008 |
|
|
1009 |
|
static void v_matchproto_(objbocdone_f) |
1010 |
2835 |
sml_bocdone(struct worker *wrk, struct objcore *oc, struct boc *boc) |
1011 |
|
{ |
1012 |
|
const struct stevedore *stv; |
1013 |
|
|
1014 |
2835 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1015 |
2835 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1016 |
2835 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
1017 |
2835 |
stv = oc->stobj->stevedore; |
1018 |
2835 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
1019 |
|
|
1020 |
2835 |
sml_bocfini(stv, boc); |
1021 |
|
|
1022 |
2835 |
if (stv->lru != NULL) { |
1023 |
2796 |
if (isnan(wrk->lastused)) |
1024 |
0 |
wrk->lastused = VTIM_real(); |
1025 |
2796 |
LRU_Add(oc, wrk->lastused); // approx timestamp is OK |
1026 |
2796 |
} |
1027 |
2835 |
} |
1028 |
|
|
1029 |
|
static const void * v_matchproto_(objgetattr_f) |
1030 |
22378 |
sml_getattr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
1031 |
|
ssize_t *len) |
1032 |
|
{ |
1033 |
|
struct object *o; |
1034 |
|
ssize_t dummy; |
1035 |
|
|
1036 |
22378 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1037 |
|
|
1038 |
22378 |
if (len == NULL) |
1039 |
13717 |
len = &dummy; |
1040 |
22378 |
o = sml_getobj(wrk, oc); |
1041 |
22378 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
1042 |
|
|
1043 |
22378 |
switch (attr) { |
1044 |
|
/* Fixed size attributes */ |
1045 |
|
#define OBJ_FIXATTR(U, l, s) \ |
1046 |
|
case OA_##U: \ |
1047 |
|
*len = sizeof o->fa_##l; \ |
1048 |
|
return (o->fa_##l); |
1049 |
|
#include "tbl/obj_attr.h" |
1050 |
|
|
1051 |
|
/* Variable size attributes */ |
1052 |
|
#define OBJ_VARATTR(U, l) \ |
1053 |
|
case OA_##U: \ |
1054 |
|
if (o->va_##l == NULL) \ |
1055 |
|
return (NULL); \ |
1056 |
|
*len = o->va_##l##_len; \ |
1057 |
|
return (o->va_##l); |
1058 |
|
#include "tbl/obj_attr.h" |
1059 |
|
|
1060 |
|
/* Auxiliary attributes */ |
1061 |
|
#define OBJ_AUXATTR(U, l) \ |
1062 |
|
case OA_##U: \ |
1063 |
|
if (o->aa_##l == NULL) \ |
1064 |
|
return (NULL); \ |
1065 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1066 |
|
*len = o->aa_##l->len; \ |
1067 |
|
return (o->aa_##l->ptr); |
1068 |
|
#include "tbl/obj_attr.h" |
1069 |
|
|
1070 |
|
default: |
1071 |
|
break; |
1072 |
|
} |
1073 |
0 |
WRONG("Unsupported OBJ_ATTR"); |
1074 |
22378 |
} |
1075 |
|
|
1076 |
|
static void * v_matchproto_(objsetattr_f) |
1077 |
10567 |
sml_setattr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
1078 |
|
ssize_t len, const void *ptr) |
1079 |
|
{ |
1080 |
|
struct object *o; |
1081 |
10567 |
void *retval = NULL; |
1082 |
|
struct storage *st; |
1083 |
|
|
1084 |
10567 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1085 |
|
|
1086 |
10567 |
o = sml_getobj(wrk, oc); |
1087 |
10567 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
1088 |
10567 |
st = o->objstore; |
1089 |
|
|
1090 |
10567 |
switch (attr) { |
1091 |
|
/* Fixed size attributes */ |
1092 |
|
#define OBJ_FIXATTR(U, l, s) \ |
1093 |
|
case OA_##U: \ |
1094 |
|
assert(len == sizeof o->fa_##l); \ |
1095 |
|
retval = o->fa_##l; \ |
1096 |
|
break; |
1097 |
|
#include "tbl/obj_attr.h" |
1098 |
|
|
1099 |
|
/* Variable size attributes */ |
1100 |
|
#define OBJ_VARATTR(U, l) \ |
1101 |
|
case OA_##U: \ |
1102 |
|
if (o->va_##l##_len > 0) { \ |
1103 |
|
AN(o->va_##l); \ |
1104 |
|
assert(len == o->va_##l##_len); \ |
1105 |
|
retval = o->va_##l; \ |
1106 |
|
} else if (len > 0) { \ |
1107 |
|
assert(len <= UINT_MAX); \ |
1108 |
|
assert(st->len + len <= st->space); \ |
1109 |
|
o->va_##l = st->ptr + st->len; \ |
1110 |
|
st->len += len; \ |
1111 |
|
o->va_##l##_len = len; \ |
1112 |
|
retval = o->va_##l; \ |
1113 |
|
} \ |
1114 |
|
break; |
1115 |
|
#include "tbl/obj_attr.h" |
1116 |
|
|
1117 |
|
/* Auxiliary attributes */ |
1118 |
|
#define OBJ_AUXATTR(U, l) \ |
1119 |
|
case OA_##U: \ |
1120 |
|
if (o->aa_##l != NULL) { \ |
1121 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1122 |
|
assert(len == o->aa_##l->len); \ |
1123 |
|
retval = o->aa_##l->ptr; \ |
1124 |
|
break; \ |
1125 |
|
} \ |
1126 |
|
if (len == 0) \ |
1127 |
|
break; \ |
1128 |
|
o->aa_##l = objallocwithnuke(wrk, oc->stobj->stevedore, \ |
1129 |
|
len, 0); \ |
1130 |
|
if (o->aa_##l == NULL) \ |
1131 |
|
break; \ |
1132 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1133 |
|
assert(len <= o->aa_##l->space); \ |
1134 |
|
o->aa_##l->len = len; \ |
1135 |
|
retval = o->aa_##l->ptr; \ |
1136 |
|
break; |
1137 |
|
#include "tbl/obj_attr.h" |
1138 |
|
|
1139 |
|
default: |
1140 |
0 |
WRONG("Unsupported OBJ_ATTR"); |
1141 |
|
break; |
1142 |
|
} |
1143 |
|
|
1144 |
10567 |
if (retval != NULL && ptr != NULL) |
1145 |
377 |
memcpy(retval, ptr, len); |
1146 |
10567 |
return (retval); |
1147 |
|
} |
1148 |
|
|
1149 |
|
const struct obj_methods SML_methods = { |
1150 |
|
.objfree = sml_objfree, |
1151 |
|
.objiterator = sml_iterator, |
1152 |
|
.objgetspace = sml_getspace, |
1153 |
|
.objextend = sml_extend, |
1154 |
|
.objtrimstore = sml_trimstore, |
1155 |
|
.objbocdone = sml_bocdone, |
1156 |
|
.objslim = sml_slim, |
1157 |
|
.objgetattr = sml_getattr, |
1158 |
|
.objsetattr = sml_setattr, |
1159 |
|
.objtouch = LRU_Touch, |
1160 |
|
.vai_init = sml_ai_init |
1161 |
|
}; |
1162 |
|
|
1163 |
|
static void |
1164 |
3 |
sml_panic_st(struct vsb *vsb, const char *hd, const struct storage *st) |
1165 |
|
{ |
1166 |
6 |
VSB_printf(vsb, "%s = %p {priv=%p, ptr=%p, len=%u, space=%u},\n", |
1167 |
3 |
hd, st, st->priv, st->ptr, st->len, st->space); |
1168 |
3 |
} |
1169 |
|
|
1170 |
|
void |
1171 |
2 |
SML_panic(struct vsb *vsb, const struct objcore *oc) |
1172 |
|
{ |
1173 |
|
struct object *o; |
1174 |
|
struct storage *st; |
1175 |
|
|
1176 |
2 |
VSB_printf(vsb, "Simple = %p,\n", oc->stobj->priv); |
1177 |
2 |
if (oc->stobj->priv == NULL) |
1178 |
0 |
return; |
1179 |
2 |
o = oc->stobj->priv; |
1180 |
2 |
PAN_CheckMagic(vsb, o, OBJECT_MAGIC); |
1181 |
2 |
sml_panic_st(vsb, "Obj", o->objstore); |
1182 |
|
|
1183 |
|
#define OBJ_FIXATTR(U, l, sz) \ |
1184 |
|
VSB_printf(vsb, "%s = ", #U); \ |
1185 |
|
VSB_quote(vsb, (const void*)o->fa_##l, sz, VSB_QUOTE_HEX); \ |
1186 |
|
VSB_printf(vsb, ",\n"); |
1187 |
|
|
1188 |
|
#define OBJ_VARATTR(U, l) \ |
1189 |
|
VSB_printf(vsb, "%s = {len=%u, ptr=%p},\n", \ |
1190 |
|
#U, o->va_##l##_len, o->va_##l); |
1191 |
|
|
1192 |
|
#define OBJ_AUXATTR(U, l) \ |
1193 |
|
do { \ |
1194 |
|
if (o->aa_##l != NULL) sml_panic_st(vsb, #U, o->aa_##l);\ |
1195 |
|
} while(0); |
1196 |
|
|
1197 |
|
#include "tbl/obj_attr.h" |
1198 |
|
|
1199 |
3 |
VTAILQ_FOREACH(st, &o->list, list) { |
1200 |
1 |
sml_panic_st(vsb, "Body", st); |
1201 |
1 |
} |
1202 |
|
} |