| | varnish-cache/bin/varnishd/cache/cache_obj.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2013-2016 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* Lifetime of an objcore: |
30 |
|
* phase 0 - nonexistent |
31 |
|
* phase 1 - created, but no stevedore associated |
32 |
|
* phase 2 - stevedore associated, being filled out |
33 |
|
* phase 3 - stable, no changes happening |
34 |
|
* phase 4 - unavailable, being dismantled |
35 |
|
* phase 5 - stevedore disassociated |
36 |
|
* phase 6 - nonexistent |
37 |
|
* |
38 |
|
* 0->1 ObjNew() creates objcore |
39 |
|
* |
40 |
|
* 1->2 STV_NewObject() associates a stevedore |
41 |
|
* |
42 |
|
* 2 ObjSetState() sets state |
43 |
|
* 2 ObjWaitState() waits for particular state |
44 |
|
* INVALID->REQ_DONE->STREAM->FINISHED->FAILED |
45 |
|
* |
46 |
|
* 2 ObjGetSpace() allocates space |
47 |
|
* 2 ObjExtend() commits content |
48 |
|
* 2 ObjWaitExtend() waits for content - used to implement ObjIterate()) |
49 |
|
* |
50 |
|
* 2 ObjSetAttr() |
51 |
|
* 2 ObjCopyAttr() |
52 |
|
* 2 ObjSetFlag() |
53 |
|
* 2 ObjSetDouble() |
54 |
|
* 2 ObjSetU32() |
55 |
|
* 2 ObjSetU64() |
56 |
|
* |
57 |
|
* 2->3 ObjBocDone() Boc removed from OC, clean it up |
58 |
|
* |
59 |
|
* 23 ObjHasAttr() |
60 |
|
* 23 ObjGetAttr() |
61 |
|
* 23 ObjCheckFlag() |
62 |
|
* 23 ObjGetDouble() |
63 |
|
* 23 ObjGetU32() |
64 |
|
* 23 ObjGetU64() |
65 |
|
* 23 ObjGetLen() |
66 |
|
* 23 ObjGetXID() |
67 |
|
* |
68 |
|
* 23 ObjIterate() ... over body |
69 |
|
* |
70 |
|
* 23 ObjTouch() Signal to LRU(-like) facilities |
71 |
|
* |
72 |
|
* 3->4 HSH_Snipe() kill if not in use |
73 |
|
* 3->4 HSH_Kill() make unavailable |
74 |
|
* |
75 |
|
* 234 ObjSlim() Release body storage (but retain attribute storage) |
76 |
|
* |
77 |
|
* 4->5 ObjFreeObj() disassociates stevedore |
78 |
|
* |
79 |
|
* 5->6 FREE_OBJ() ...in HSH_DerefObjCore() |
80 |
|
*/ |
81 |
|
|
82 |
|
#include "config.h" |
83 |
|
|
84 |
|
#include <stdlib.h> |
85 |
|
|
86 |
|
#include "cache_varnishd.h" |
87 |
|
#include "cache_obj.h" |
88 |
|
#include "cache_objhead.h" |
89 |
|
#include "vend.h" |
90 |
|
#include "storage/storage.h" |
91 |
|
|
92 |
|
static const struct obj_methods * |
93 |
162958 |
obj_getmethods(const struct objcore *oc) |
94 |
|
{ |
95 |
|
|
96 |
162958 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
97 |
162958 |
CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
98 |
162958 |
AN(oc->stobj->stevedore->methods); |
99 |
162958 |
return (oc->stobj->stevedore->methods); |
100 |
|
} |
101 |
|
|
102 |
|
static struct boc * |
103 |
3245 |
obj_newboc(void) |
104 |
|
{ |
105 |
|
struct boc *boc; |
106 |
|
|
107 |
3245 |
ALLOC_OBJ(boc, BOC_MAGIC); |
108 |
3245 |
AN(boc); |
109 |
3245 |
Lck_New(&boc->mtx, lck_busyobj); |
110 |
3245 |
PTOK(pthread_cond_init(&boc->cond, NULL)); |
111 |
3245 |
boc->refcount = 1; |
112 |
3245 |
return (boc); |
113 |
|
} |
114 |
|
|
115 |
|
static void |
116 |
2956 |
obj_deleteboc(struct boc **p) |
117 |
|
{ |
118 |
|
struct boc *boc; |
119 |
|
|
120 |
2956 |
TAKE_OBJ_NOTNULL(boc, p, BOC_MAGIC); |
121 |
2956 |
Lck_Delete(&boc->mtx); |
122 |
2956 |
PTOK(pthread_cond_destroy(&boc->cond)); |
123 |
2956 |
free(boc->vary); |
124 |
2956 |
FREE_OBJ(boc); |
125 |
2956 |
} |
126 |
|
|
127 |
|
/*==================================================================== |
128 |
|
* ObjNew() |
129 |
|
* |
130 |
|
*/ |
131 |
|
|
132 |
|
struct objcore * |
133 |
3245 |
ObjNew(const struct worker *wrk) |
134 |
|
{ |
135 |
|
struct objcore *oc; |
136 |
|
|
137 |
3245 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
138 |
|
|
139 |
3245 |
ALLOC_OBJ(oc, OBJCORE_MAGIC); |
140 |
3245 |
AN(oc); |
141 |
3245 |
wrk->stats->n_objectcore++; |
142 |
3245 |
oc->last_lru = NAN; |
143 |
3245 |
oc->boc = obj_newboc(); |
144 |
|
|
145 |
3245 |
return (oc); |
146 |
|
} |
147 |
|
|
148 |
|
/*==================================================================== |
149 |
|
* ObjDestroy() |
150 |
|
* |
151 |
|
*/ |
152 |
|
|
153 |
|
void |
154 |
1878 |
ObjDestroy(const struct worker *wrk, struct objcore **p) |
155 |
|
{ |
156 |
|
struct objcore *oc; |
157 |
|
|
158 |
1878 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
159 |
1878 |
TAKE_OBJ_NOTNULL(oc, p, OBJCORE_MAGIC); |
160 |
1878 |
if (oc->boc != NULL) |
161 |
35 |
obj_deleteboc(&oc->boc); |
162 |
1878 |
FREE_OBJ(oc); |
163 |
1878 |
wrk->stats->n_objectcore--; |
164 |
1878 |
} |
165 |
|
|
166 |
|
/*==================================================================== |
167 |
|
* ObjIterate() |
168 |
|
* |
169 |
|
*/ |
170 |
|
|
171 |
|
int |
172 |
2445 |
ObjIterate(struct worker *wrk, struct objcore *oc, |
173 |
|
void *priv, objiterate_f *func, int final) |
174 |
|
{ |
175 |
2445 |
const struct obj_methods *om = obj_getmethods(oc); |
176 |
|
|
177 |
2445 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
178 |
2445 |
AN(func); |
179 |
2445 |
AN(om->objiterator); |
180 |
2445 |
return (om->objiterator(wrk, oc, priv, func, final)); |
181 |
|
} |
182 |
|
|
183 |
|
/*==================================================================== |
184 |
|
* ObjVAI...(): Asynchronous Iteration |
185 |
|
* |
186 |
|
* |
187 |
|
* ObjVAIinit() returns an opaque handle, or NULL if not supported |
188 |
|
* |
189 |
|
* A VAI handle must not be used concurrently |
190 |
|
* |
191 |
|
* the vai_notify_cb(priv) will be called asynchronously by the storage |
192 |
|
* engine when a -EAGAIN / -ENOBUFS condition is over and ObjVAIlease() |
193 |
|
* can be called again. |
194 |
|
* |
195 |
|
* Note: |
196 |
|
* - the callback gets executed by an arbitrary thread |
197 |
|
* - WITH the boc mtx held |
198 |
|
* so it should never block and only do minimal work |
199 |
|
* |
200 |
|
* ObjVAIlease() fills the vscarab with leases. returns: |
201 |
|
* |
202 |
|
* -EAGAIN: nothing available at the moment, storage will notify, no use to |
203 |
|
* call again until notification |
204 |
|
* -ENOBUFS: caller needs to return leases, storage will notify |
205 |
|
* -EPIPE: BOS_FAILED for busy object |
206 |
|
* -(errno): other problem, fatal |
207 |
|
* |
208 |
|
* >= 0: number of viovs added (== scarab->capacity - scarab->used) |
209 |
|
* |
210 |
|
* struct vscarab: |
211 |
|
* |
212 |
|
* the leases can be used by the caller until returned with |
213 |
|
* ObjVAIreturn(). The storage guarantees that the lease member is a |
214 |
|
* multiple of 8 (that is, the lower three bits are zero). These can be |
215 |
|
* used by the caller between lease and return, but must be cleared to |
216 |
|
* zero before returning. |
217 |
|
* |
218 |
|
* ObjVAIbuffer() allocates temporary buffers, returns: |
219 |
|
* |
220 |
|
* -EAGAIN: allocation can not be fulfilled immediately, storage will notify, |
221 |
|
* no use to call again until notification |
222 |
|
* -EINVAL: size larger than UINT_MAX requested |
223 |
|
* -(errno): other problem, fatal |
224 |
|
* n: n > 0, number of viovs filled |
225 |
|
* |
226 |
|
* The struct vscarab is used on the way in and out: On the way in, the |
227 |
|
* iov.iov_len members contain the sizes the caller requests, all other |
228 |
|
* members of the struct viovs are expected to be zero initialized. |
229 |
|
* |
230 |
|
* The maximum size to be requested is UINT_MAX. |
231 |
|
* |
232 |
|
* ObjVAIbuffer() may return sizes larger than requested. The returned n |
233 |
|
* might be smaller than requested. |
234 |
|
* |
235 |
|
* ObjVAIreturn() returns leases collected in a struct vscaret |
236 |
|
* |
237 |
|
* it must be called with a vscaret, which holds an array of lease values |
238 |
|
* received via ObjVAIlease() or ObjVAIbuffer() when the caller can |
239 |
|
* guarantee that they are no longer accessed. |
240 |
|
* |
241 |
|
* ObjVAIreturn() may retain leases in the vscaret if the implementation |
242 |
|
* still requires them, iow, the vscaret might not be empty upon return. |
243 |
|
* |
244 |
|
* ObjVAIfini() finalized iteration |
245 |
|
* |
246 |
|
* it must be called when iteration is done, irrespective of error status |
247 |
|
*/ |
248 |
|
|
249 |
|
vai_hdl |
250 |
2478 |
ObjVAIinit(struct worker *wrk, struct objcore *oc, struct ws *ws, |
251 |
|
vai_notify_cb *cb, void *cb_priv) |
252 |
|
{ |
253 |
2478 |
const struct obj_methods *om = obj_getmethods(oc); |
254 |
|
|
255 |
2478 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
256 |
|
|
257 |
2478 |
if (om->vai_init == NULL) |
258 |
0 |
return (NULL); |
259 |
2478 |
return (om->vai_init(wrk, oc, ws, cb, cb_priv)); |
260 |
2478 |
} |
261 |
|
|
262 |
|
int |
263 |
6712 |
ObjVAIlease(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
264 |
|
{ |
265 |
6712 |
struct vai_hdl_preamble *vaip = vhdl; |
266 |
|
|
267 |
6712 |
AN(vaip); |
268 |
6712 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
269 |
6712 |
AN(vaip->vai_lease); |
270 |
6712 |
return (vaip->vai_lease(wrk, vhdl, scarab)); |
271 |
|
} |
272 |
|
|
273 |
|
int |
274 |
16 |
ObjVAIbuffer(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
275 |
|
{ |
276 |
16 |
struct vai_hdl_preamble *vaip = vhdl; |
277 |
|
|
278 |
16 |
AN(vaip); |
279 |
16 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
280 |
16 |
AN(vaip->vai_buffer); |
281 |
16 |
return (vaip->vai_buffer(wrk, vhdl, scarab)); |
282 |
|
} |
283 |
|
|
284 |
|
void |
285 |
3004 |
ObjVAIreturn(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
286 |
|
{ |
287 |
3004 |
struct vai_hdl_preamble *vaip = vhdl; |
288 |
|
|
289 |
3004 |
AN(vaip); |
290 |
3004 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
291 |
3004 |
AN(vaip->vai_return); |
292 |
3004 |
vaip->vai_return(wrk, vhdl, scaret); |
293 |
3004 |
} |
294 |
|
|
295 |
|
void |
296 |
2477 |
ObjVAIfini(struct worker *wrk, vai_hdl *vhdlp) |
297 |
|
{ |
298 |
2477 |
AN(vhdlp); |
299 |
2477 |
struct vai_hdl_preamble *vaip = *vhdlp; |
300 |
|
|
301 |
2477 |
AN(vaip); |
302 |
2477 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
303 |
2477 |
AN(vaip->vai_lease); |
304 |
2477 |
vaip->vai_fini(wrk, vhdlp); |
305 |
2477 |
} |
306 |
|
|
307 |
|
/*==================================================================== |
308 |
|
* ObjGetSpace() |
309 |
|
* |
310 |
|
* This function returns a pointer and length of free space. If there |
311 |
|
* is no free space, some will be added first. |
312 |
|
* |
313 |
|
* The "sz" argument is an input hint of how much space is desired. |
314 |
|
* 0 means "unknown", return some default size (maybe fetch_chunksize) |
315 |
|
*/ |
316 |
|
|
317 |
|
int |
318 |
58065 |
ObjGetSpace(struct worker *wrk, struct objcore *oc, ssize_t *sz, uint8_t **ptr) |
319 |
|
{ |
320 |
58065 |
const struct obj_methods *om = obj_getmethods(oc); |
321 |
|
|
322 |
58065 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
323 |
58065 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
324 |
58065 |
AN(sz); |
325 |
58065 |
AN(ptr); |
326 |
58065 |
assert(*sz >= 0); |
327 |
|
|
328 |
58065 |
AN(om->objgetspace); |
329 |
58065 |
return (om->objgetspace(wrk, oc, sz, ptr)); |
330 |
|
} |
331 |
|
|
332 |
|
/*==================================================================== |
333 |
|
* ObjExtend() |
334 |
|
* |
335 |
|
* This function extends the used part of the object a number of bytes |
336 |
|
* into the last space returned by ObjGetSpace() |
337 |
|
* |
338 |
|
* The final flag must be set on the last call, and it will release any |
339 |
|
* surplus space allocated. |
340 |
|
*/ |
341 |
|
|
342 |
|
static void |
343 |
56596 |
obj_extend_condwait(const struct objcore *oc) |
344 |
|
{ |
345 |
|
|
346 |
56596 |
if (oc->boc->transit_buffer == 0) |
347 |
56103 |
return; |
348 |
|
|
349 |
493 |
assert(oc->flags & OC_F_TRANSIENT); |
350 |
542 |
while (!(oc->flags & OC_F_CANCEL) && oc->boc->fetched_so_far > |
351 |
540 |
oc->boc->delivered_so_far + oc->boc->transit_buffer) |
352 |
49 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
353 |
56596 |
} |
354 |
|
|
355 |
|
// notify of an extension of the boc or state change |
356 |
|
|
357 |
|
static void |
358 |
59803 |
obj_boc_notify(struct boc *boc) |
359 |
|
{ |
360 |
|
struct vai_qe *qe, *next; |
361 |
|
|
362 |
59803 |
PTOK(pthread_cond_broadcast(&boc->cond)); |
363 |
59803 |
qe = VSLIST_FIRST(&boc->vai_q_head); |
364 |
59803 |
VSLIST_FIRST(&boc->vai_q_head) = NULL; |
365 |
61130 |
while (qe != NULL) { |
366 |
1327 |
CHECK_OBJ(qe, VAI_Q_MAGIC); |
367 |
1327 |
AN(qe->flags & VAI_QF_INQUEUE); |
368 |
1327 |
qe->flags &= ~VAI_QF_INQUEUE; |
369 |
1327 |
next = VSLIST_NEXT(qe, list); |
370 |
1327 |
VSLIST_NEXT(qe, list) = NULL; |
371 |
1327 |
qe->cb(qe->hdl, qe->priv); |
372 |
1327 |
qe = next; |
373 |
|
} |
374 |
59803 |
} |
375 |
|
|
376 |
|
void |
377 |
57287 |
ObjExtend(struct worker *wrk, struct objcore *oc, ssize_t l, int final) |
378 |
|
{ |
379 |
57287 |
const struct obj_methods *om = obj_getmethods(oc); |
380 |
|
|
381 |
57287 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
382 |
57287 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
383 |
57287 |
AN(om->objextend); |
384 |
57287 |
assert(l >= 0); |
385 |
|
|
386 |
57287 |
if (l > 0) { |
387 |
56596 |
Lck_Lock(&oc->boc->mtx); |
388 |
56596 |
obj_extend_condwait(oc); |
389 |
56596 |
om->objextend(wrk, oc, l); |
390 |
56596 |
oc->boc->fetched_so_far += l; |
391 |
56596 |
obj_boc_notify(oc->boc); |
392 |
56596 |
Lck_Unlock(&oc->boc->mtx); |
393 |
|
|
394 |
56596 |
if (oc->boc->transit_buffer > 0) |
395 |
493 |
wrk->stats->transit_buffered += l; |
396 |
56103 |
else if (oc->flags & OC_F_TRANSIENT) |
397 |
1337 |
wrk->stats->transit_stored += l; |
398 |
56596 |
} |
399 |
|
|
400 |
57287 |
assert(oc->boc->state < BOS_FINISHED); |
401 |
57287 |
if (final && om->objtrimstore != NULL) |
402 |
1904 |
om->objtrimstore(wrk, oc); |
403 |
57287 |
} |
404 |
|
|
405 |
|
/*==================================================================== |
406 |
|
*/ |
407 |
|
|
408 |
|
static inline void |
409 |
4600 |
objSignalFetchLocked(const struct objcore *oc, uint64_t l) |
410 |
|
{ |
411 |
4600 |
if (oc->boc->transit_buffer > 0) { |
412 |
933 |
assert(oc->flags & OC_F_TRANSIENT); |
413 |
|
/* Signal the new client position */ |
414 |
933 |
oc->boc->delivered_so_far = l; |
415 |
933 |
PTOK(pthread_cond_signal(&oc->boc->cond)); |
416 |
933 |
} |
417 |
4600 |
} |
418 |
|
|
419 |
|
uint64_t |
420 |
0 |
ObjWaitExtend(const struct worker *wrk, const struct objcore *oc, uint64_t l, |
421 |
|
enum boc_state_e *statep) |
422 |
|
{ |
423 |
|
enum boc_state_e state; |
424 |
|
uint64_t rv; |
425 |
|
|
426 |
0 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
427 |
0 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
428 |
0 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
429 |
0 |
Lck_Lock(&oc->boc->mtx); |
430 |
0 |
while (1) { |
431 |
0 |
rv = oc->boc->fetched_so_far; |
432 |
0 |
assert(l <= rv || oc->boc->state == BOS_FAILED); |
433 |
0 |
state = oc->boc->state; |
434 |
0 |
objSignalFetchLocked(oc, l); |
435 |
0 |
if (rv > l || state >= BOS_FINISHED) |
436 |
0 |
break; |
437 |
0 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
438 |
|
} |
439 |
0 |
Lck_Unlock(&oc->boc->mtx); |
440 |
0 |
if (statep != NULL) |
441 |
0 |
*statep = state; |
442 |
0 |
return (rv); |
443 |
|
} |
444 |
|
|
445 |
|
// get a new extension _or_ register a notification |
446 |
|
uint64_t |
447 |
4600 |
ObjVAIGetExtend(struct worker *wrk, const struct objcore *oc, uint64_t l, |
448 |
|
enum boc_state_e *statep, struct vai_qe *qe) |
449 |
|
{ |
450 |
|
enum boc_state_e state; |
451 |
|
uint64_t rv; |
452 |
|
|
453 |
4600 |
(void) wrk; |
454 |
4600 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
455 |
4600 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
456 |
4600 |
CHECK_OBJ_NOTNULL(qe, VAI_Q_MAGIC); |
457 |
4600 |
Lck_Lock(&oc->boc->mtx); |
458 |
4600 |
rv = oc->boc->fetched_so_far; |
459 |
4600 |
assert(l <= rv || oc->boc->state == BOS_FAILED); |
460 |
4600 |
state = oc->boc->state; |
461 |
4600 |
objSignalFetchLocked(oc, l); |
462 |
4600 |
if (l == rv && state < BOS_FINISHED && |
463 |
2578 |
(qe->flags & VAI_QF_INQUEUE) == 0) { |
464 |
1337 |
qe->flags |= VAI_QF_INQUEUE; |
465 |
1337 |
VSLIST_INSERT_HEAD(&oc->boc->vai_q_head, qe, list); |
466 |
1337 |
} |
467 |
4600 |
Lck_Unlock(&oc->boc->mtx); |
468 |
4600 |
if (statep != NULL) |
469 |
4597 |
*statep = state; |
470 |
4600 |
return (rv); |
471 |
|
} |
472 |
|
|
473 |
|
void |
474 |
771 |
ObjVAICancel(struct worker *wrk, struct boc *boc, struct vai_qe *qe) |
475 |
|
{ |
476 |
|
|
477 |
771 |
(void) wrk; |
478 |
771 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
479 |
771 |
CHECK_OBJ_NOTNULL(qe, VAI_Q_MAGIC); |
480 |
|
|
481 |
771 |
Lck_Lock(&boc->mtx); |
482 |
|
// inefficient, but should be rare |
483 |
771 |
if ((qe->flags & VAI_QF_INQUEUE) != 0) |
484 |
10 |
VSLIST_REMOVE(&boc->vai_q_head, qe, vai_qe, list); |
485 |
771 |
qe->flags = 0; |
486 |
771 |
Lck_Unlock(&boc->mtx); |
487 |
771 |
} |
488 |
|
|
489 |
|
/*==================================================================== |
490 |
|
*/ |
491 |
|
|
492 |
|
void |
493 |
5374 |
ObjSetState(struct worker *wrk, struct objcore *oc, enum boc_state_e next, |
494 |
|
unsigned broadcast) |
495 |
|
{ |
496 |
|
const struct obj_methods *om; |
497 |
|
|
498 |
5374 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
499 |
5374 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
500 |
5374 |
assert(next > oc->boc->state); |
501 |
|
|
502 |
5374 |
CHECK_OBJ_ORNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
503 |
5374 |
assert(next != BOS_FINISHED || (oc->oa_present & (1 << OA_LEN))); |
504 |
|
|
505 |
5374 |
if (oc->stobj->stevedore != NULL) { |
506 |
3071 |
om = oc->stobj->stevedore->methods; |
507 |
3071 |
if (om->objsetstate != NULL) |
508 |
0 |
om->objsetstate(wrk, oc, next); |
509 |
3071 |
} |
510 |
|
|
511 |
5374 |
if (next == BOS_FAILED) |
512 |
68 |
HSH_Fail(wrk, oc); |
513 |
5306 |
else if (oc->boc->state < BOS_STREAM && next >= BOS_STREAM) |
514 |
2222 |
HSH_Unbusy(wrk, oc); |
515 |
|
|
516 |
5374 |
Lck_Lock(&oc->boc->mtx); |
517 |
5374 |
oc->boc->state = next; |
518 |
5374 |
if (broadcast) |
519 |
3207 |
obj_boc_notify(oc->boc); |
520 |
5374 |
Lck_Unlock(&oc->boc->mtx); |
521 |
5374 |
} |
522 |
|
|
523 |
|
/*==================================================================== |
524 |
|
*/ |
525 |
|
|
526 |
|
void |
527 |
2642 |
ObjWaitState(const struct objcore *oc, enum boc_state_e want) |
528 |
|
{ |
529 |
|
|
530 |
2642 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
531 |
2642 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
532 |
|
|
533 |
2642 |
Lck_Lock(&oc->boc->mtx); |
534 |
|
/* wake up obj_extend_condwait() */ |
535 |
2642 |
if (oc->flags & OC_F_CANCEL) |
536 |
354 |
PTOK(pthread_cond_signal(&oc->boc->cond)); |
537 |
29032 |
while (1) { |
538 |
29032 |
if (oc->boc->state >= want) |
539 |
2642 |
break; |
540 |
26390 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
541 |
|
} |
542 |
2642 |
Lck_Unlock(&oc->boc->mtx); |
543 |
2642 |
} |
544 |
|
|
545 |
|
/*==================================================================== |
546 |
|
* ObjGetlen() |
547 |
|
* |
548 |
|
* This is a separate function because it may need locking |
549 |
|
*/ |
550 |
|
|
551 |
|
uint64_t |
552 |
5437 |
ObjGetLen(struct worker *wrk, struct objcore *oc) |
553 |
|
{ |
554 |
|
uint64_t len; |
555 |
|
|
556 |
5437 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
557 |
|
|
558 |
5437 |
AZ(ObjGetU64(wrk, oc, OA_LEN, &len)); |
559 |
5437 |
return (len); |
560 |
|
} |
561 |
|
|
562 |
|
/*==================================================================== |
563 |
|
* ObjSlim() |
564 |
|
* |
565 |
|
* Free the whatever storage can be freed, without freeing the actual |
566 |
|
* object yet. |
567 |
|
*/ |
568 |
|
|
569 |
|
void |
570 |
1410 |
ObjSlim(struct worker *wrk, struct objcore *oc) |
571 |
|
{ |
572 |
1410 |
const struct obj_methods *om = obj_getmethods(oc); |
573 |
|
|
574 |
1410 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
575 |
|
|
576 |
1410 |
if (om->objslim != NULL) |
577 |
1410 |
om->objslim(wrk, oc); |
578 |
1410 |
} |
579 |
|
|
580 |
|
/*==================================================================== |
581 |
|
* Called when the boc used to populate the objcore is going away. |
582 |
|
* Useful for releasing any leftovers from Trim. |
583 |
|
*/ |
584 |
|
|
585 |
|
void |
586 |
2921 |
ObjBocDone(struct worker *wrk, struct objcore *oc, struct boc **boc) |
587 |
|
{ |
588 |
|
const struct obj_methods *m; |
589 |
|
|
590 |
2921 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
591 |
2921 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
592 |
2921 |
AN(boc); |
593 |
2921 |
CHECK_OBJ_NOTNULL(*boc, BOC_MAGIC); |
594 |
2921 |
CHECK_OBJ_ORNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
595 |
2921 |
if (oc->stobj->stevedore != NULL) { |
596 |
2868 |
m = obj_getmethods(oc); |
597 |
2868 |
if (m->objbocdone != NULL) |
598 |
2868 |
m->objbocdone(wrk, oc, *boc); |
599 |
2868 |
} |
600 |
2921 |
obj_deleteboc(boc); |
601 |
2921 |
} |
602 |
|
|
603 |
|
/*==================================================================== |
604 |
|
*/ |
605 |
|
void |
606 |
1811 |
ObjFreeObj(struct worker *wrk, struct objcore *oc) |
607 |
|
{ |
608 |
1811 |
const struct obj_methods *m = obj_getmethods(oc); |
609 |
|
|
610 |
1811 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
611 |
|
|
612 |
1811 |
AN(m->objfree); |
613 |
1811 |
m->objfree(wrk, oc); |
614 |
1811 |
AZ(oc->stobj->stevedore); |
615 |
1811 |
} |
616 |
|
|
617 |
|
/*==================================================================== |
618 |
|
* ObjHasAttr() |
619 |
|
* |
620 |
|
* Check if object has this attribute |
621 |
|
*/ |
622 |
|
|
623 |
|
int |
624 |
7879 |
ObjHasAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr) |
625 |
|
{ |
626 |
|
|
627 |
7879 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
628 |
7879 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
629 |
|
|
630 |
7879 |
if (oc->oa_present) |
631 |
7858 |
return (oc->oa_present & (1 << attr)); |
632 |
|
|
633 |
|
/* resurrected persistent objects don't have oa_present set */ |
634 |
21 |
return (ObjGetAttr(wrk, oc, attr, NULL) != NULL ? 1 : 0); |
635 |
7879 |
} |
636 |
|
|
637 |
|
/*==================================================================== |
638 |
|
* ObjGetAttr() |
639 |
|
* |
640 |
|
* Get an attribute of the object. |
641 |
|
* |
642 |
|
* Returns NULL on unset or zero length attributes and len set to |
643 |
|
* zero. Returns Non-NULL otherwise and len is updated with the attributes |
644 |
|
* length. |
645 |
|
*/ |
646 |
|
|
647 |
|
const void * |
648 |
22614 |
ObjGetAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
649 |
|
ssize_t *len) |
650 |
|
{ |
651 |
22614 |
const struct obj_methods *om = obj_getmethods(oc); |
652 |
|
|
653 |
22614 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
654 |
|
|
655 |
22614 |
AN(om->objgetattr); |
656 |
22614 |
return (om->objgetattr(wrk, oc, attr, len)); |
657 |
|
} |
658 |
|
|
659 |
|
/*==================================================================== |
660 |
|
* ObjSetAttr() |
661 |
|
* |
662 |
|
* Setting fixed size attributes always succeeds. |
663 |
|
* |
664 |
|
* Setting a variable size attribute asserts if the combined size of the |
665 |
|
* variable attributes exceeds the total variable attribute space set at |
666 |
|
* object creation. If there is space it always succeeds. |
667 |
|
* |
668 |
|
* Setting an auxiliary attribute can fail. |
669 |
|
* |
670 |
|
* Resetting any variable asserts if the new length does not match the |
671 |
|
* previous length exactly. |
672 |
|
* |
673 |
|
* If ptr is Non-NULL, it points to the new content which is copied into |
674 |
|
* the attribute. Otherwise the caller will have to do the copying. |
675 |
|
* |
676 |
|
* Return value is non-NULL on success and NULL on failure. If ptr was |
677 |
|
* non-NULL, it is an error to use the returned pointer to set the |
678 |
|
* attribute data, it is only a success indicator in that case. |
679 |
|
*/ |
680 |
|
|
681 |
|
void * |
682 |
10700 |
ObjSetAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
683 |
|
ssize_t len, const void *ptr) |
684 |
|
{ |
685 |
10700 |
const struct obj_methods *om = obj_getmethods(oc); |
686 |
|
void *r; |
687 |
|
|
688 |
10700 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
689 |
10700 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
690 |
|
|
691 |
10700 |
AN(om->objsetattr); |
692 |
10700 |
assert((int)attr < 16); |
693 |
10700 |
r = om->objsetattr(wrk, oc, attr, len, ptr); |
694 |
10700 |
if (r) |
695 |
10700 |
oc->oa_present |= (1 << attr); |
696 |
10700 |
return (r); |
697 |
|
} |
698 |
|
|
699 |
|
/*==================================================================== |
700 |
|
* ObjTouch() |
701 |
|
*/ |
702 |
|
|
703 |
|
void |
704 |
3296 |
ObjTouch(struct worker *wrk, struct objcore *oc, vtim_real now) |
705 |
|
{ |
706 |
3296 |
const struct obj_methods *om = obj_getmethods(oc); |
707 |
|
|
708 |
3296 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
709 |
3296 |
if (om->objtouch != NULL) |
710 |
3296 |
om->objtouch(wrk, oc, now); |
711 |
3296 |
} |
712 |
|
|
713 |
|
/*==================================================================== |
714 |
|
* Utility functions which work on top of the previous ones |
715 |
|
*/ |
716 |
|
|
717 |
|
int |
718 |
59 |
ObjCopyAttr(struct worker *wrk, struct objcore *oc, struct objcore *ocs, |
719 |
|
enum obj_attr attr) |
720 |
|
{ |
721 |
|
const void *vps; |
722 |
|
void *vpd; |
723 |
|
ssize_t l; |
724 |
|
|
725 |
59 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
726 |
59 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
727 |
59 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
728 |
59 |
CHECK_OBJ_NOTNULL(ocs, OBJCORE_MAGIC); |
729 |
|
|
730 |
59 |
vps = ObjGetAttr(wrk, ocs, attr, &l); |
731 |
|
// XXX: later we want to have zero-length OA's too |
732 |
59 |
if (vps == NULL || l <= 0) |
733 |
0 |
return (-1); |
734 |
59 |
vpd = ObjSetAttr(wrk, oc, attr, l, vps); |
735 |
59 |
if (vpd == NULL) |
736 |
0 |
return (-1); |
737 |
59 |
return (0); |
738 |
59 |
} |
739 |
|
|
740 |
|
int |
741 |
2244 |
ObjSetXID(struct worker *wrk, struct objcore *oc, vxid_t xid) |
742 |
|
{ |
743 |
|
uint64_t u; |
744 |
|
|
745 |
2244 |
u = VXID(xid); |
746 |
2244 |
AZ(ObjSetU64(wrk, oc, OA_VXID, u)); |
747 |
2244 |
return (0); |
748 |
|
} |
749 |
|
|
750 |
|
|
751 |
|
vxid_t |
752 |
2946 |
ObjGetXID(struct worker *wrk, struct objcore *oc) |
753 |
|
{ |
754 |
|
vxid_t u; |
755 |
|
|
756 |
2946 |
AZ(ObjGetU64(wrk, oc, OA_VXID, &u.vxid)); |
757 |
2946 |
return (u); |
758 |
|
} |
759 |
|
|
760 |
|
/*-------------------------------------------------------------------- |
761 |
|
* There is no well-defined byteorder for IEEE-754 double and the |
762 |
|
* correct solution (frexp(3) and manual encoding) is more work |
763 |
|
* than our (weak) goal of being endian-agnostic requires at this point. |
764 |
|
* We give it a shot by memcpy'ing doubles over a uint64_t and then |
765 |
|
* BE encode that. |
766 |
|
*/ |
767 |
|
|
768 |
|
int |
769 |
2244 |
ObjSetDouble(struct worker *wrk, struct objcore *oc, enum obj_attr a, double t) |
770 |
|
{ |
771 |
|
void *vp; |
772 |
|
uint64_t u; |
773 |
|
|
774 |
2244 |
assert(sizeof t == sizeof u); |
775 |
2244 |
memcpy(&u, &t, sizeof u); |
776 |
2244 |
vp = ObjSetAttr(wrk, oc, a, sizeof u, NULL); |
777 |
2244 |
if (vp == NULL) |
778 |
0 |
return (-1); |
779 |
2244 |
vbe64enc(vp, u); |
780 |
2244 |
return (0); |
781 |
2244 |
} |
782 |
|
|
783 |
|
int |
784 |
2 |
ObjGetDouble(struct worker *wrk, struct objcore *oc, enum obj_attr a, double *d) |
785 |
|
{ |
786 |
|
const void *vp; |
787 |
|
uint64_t u; |
788 |
|
ssize_t l; |
789 |
|
|
790 |
2 |
assert(sizeof *d == sizeof u); |
791 |
2 |
vp = ObjGetAttr(wrk, oc, a, &l); |
792 |
2 |
if (vp == NULL) |
793 |
0 |
return (-1); |
794 |
2 |
if (d != NULL) { |
795 |
2 |
assert(l == sizeof u); |
796 |
2 |
u = vbe64dec(vp); |
797 |
2 |
memcpy(d, &u, sizeof *d); |
798 |
2 |
} |
799 |
2 |
return (0); |
800 |
2 |
} |
801 |
|
|
802 |
|
/*-------------------------------------------------------------------- |
803 |
|
*/ |
804 |
|
|
805 |
|
int |
806 |
4995 |
ObjSetU64(struct worker *wrk, struct objcore *oc, enum obj_attr a, uint64_t t) |
807 |
|
{ |
808 |
|
void *vp; |
809 |
|
|
810 |
4995 |
vp = ObjSetAttr(wrk, oc, a, sizeof t, NULL); |
811 |
4995 |
if (vp == NULL) |
812 |
0 |
return (-1); |
813 |
4995 |
vbe64enc(vp, t); |
814 |
4995 |
return (0); |
815 |
4995 |
} |
816 |
|
|
817 |
|
int |
818 |
8387 |
ObjGetU64(struct worker *wrk, struct objcore *oc, enum obj_attr a, uint64_t *d) |
819 |
|
{ |
820 |
|
const void *vp; |
821 |
|
ssize_t l; |
822 |
|
|
823 |
8387 |
vp = ObjGetAttr(wrk, oc, a, &l); |
824 |
8387 |
if (vp == NULL || l != sizeof *d) |
825 |
2 |
return (-1); |
826 |
8387 |
if (d != NULL) |
827 |
8386 |
*d = vbe64dec(vp); |
828 |
8387 |
return (0); |
829 |
8385 |
} |
830 |
|
|
831 |
|
/*-------------------------------------------------------------------- |
832 |
|
*/ |
833 |
|
|
834 |
|
int |
835 |
7342 |
ObjCheckFlag(struct worker *wrk, struct objcore *oc, enum obj_flags of) |
836 |
|
{ |
837 |
|
const uint8_t *fp; |
838 |
|
|
839 |
7342 |
fp = ObjGetAttr(wrk, oc, OA_FLAGS, NULL); |
840 |
7342 |
AN(fp); |
841 |
7342 |
return ((*fp) & of); |
842 |
|
} |
843 |
|
|
844 |
|
void |
845 |
495 |
ObjSetFlag(struct worker *wrk, struct objcore *oc, enum obj_flags of, int val) |
846 |
|
{ |
847 |
|
uint8_t *fp; |
848 |
|
|
849 |
495 |
fp = ObjSetAttr(wrk, oc, OA_FLAGS, 1, NULL); |
850 |
495 |
AN(fp); |
851 |
495 |
if (val) |
852 |
493 |
(*fp) |= of; |
853 |
|
else |
854 |
2 |
(*fp) &= ~of; |
855 |
495 |
} |
856 |
|
|
857 |
|
/*==================================================================== |
858 |
|
* Object event subscription mechanism. |
859 |
|
* |
860 |
|
* XXX: it is extremely unclear what the locking circumstances are here. |
861 |
|
*/ |
862 |
|
|
863 |
|
struct oev_entry { |
864 |
|
unsigned magic; |
865 |
|
#define OEV_MAGIC 0xb0b7c5a1 |
866 |
|
unsigned mask; |
867 |
|
obj_event_f *func; |
868 |
|
void *priv; |
869 |
|
VTAILQ_ENTRY(oev_entry) list; |
870 |
|
}; |
871 |
|
|
872 |
|
static VTAILQ_HEAD(,oev_entry) oev_list; |
873 |
|
static pthread_rwlock_t oev_rwl; |
874 |
|
static unsigned oev_mask; |
875 |
|
|
876 |
|
/* |
877 |
|
* NB: ObjSubscribeEvents() is not atomic: |
878 |
|
* oev_mask is checked optimistically in ObjSendEvent() |
879 |
|
*/ |
880 |
|
uintptr_t |
881 |
39 |
ObjSubscribeEvents(obj_event_f *func, void *priv, unsigned mask) |
882 |
|
{ |
883 |
|
struct oev_entry *oev; |
884 |
|
|
885 |
39 |
AN(func); |
886 |
39 |
AZ(mask & ~OEV_MASK); |
887 |
|
|
888 |
39 |
ALLOC_OBJ(oev, OEV_MAGIC); |
889 |
39 |
AN(oev); |
890 |
39 |
oev->func = func; |
891 |
39 |
oev->priv = priv; |
892 |
39 |
oev->mask = mask; |
893 |
39 |
PTOK(pthread_rwlock_wrlock(&oev_rwl)); |
894 |
39 |
VTAILQ_INSERT_TAIL(&oev_list, oev, list); |
895 |
39 |
oev_mask |= mask; |
896 |
39 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
897 |
39 |
return ((uintptr_t)oev); |
898 |
|
} |
899 |
|
|
900 |
|
void |
901 |
1 |
ObjUnsubscribeEvents(uintptr_t *handle) |
902 |
|
{ |
903 |
1 |
struct oev_entry *oev, *oev2 = NULL; |
904 |
1 |
unsigned newmask = 0; |
905 |
|
|
906 |
1 |
AN(handle); |
907 |
1 |
AN(*handle); |
908 |
1 |
PTOK(pthread_rwlock_wrlock(&oev_rwl)); |
909 |
2 |
VTAILQ_FOREACH(oev, &oev_list, list) { |
910 |
1 |
CHECK_OBJ_NOTNULL(oev, OEV_MAGIC); |
911 |
1 |
if ((uintptr_t)oev == *handle) |
912 |
1 |
oev2 = oev; |
913 |
|
else |
914 |
0 |
newmask |= oev->mask; |
915 |
1 |
} |
916 |
1 |
AN(oev2); |
917 |
1 |
VTAILQ_REMOVE(&oev_list, oev2, list); |
918 |
1 |
oev_mask = newmask; |
919 |
1 |
AZ(newmask & ~OEV_MASK); |
920 |
1 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
921 |
1 |
FREE_OBJ(oev2); |
922 |
1 |
*handle = 0; |
923 |
1 |
} |
924 |
|
|
925 |
|
void |
926 |
3342 |
ObjSendEvent(struct worker *wrk, struct objcore *oc, unsigned event) |
927 |
|
{ |
928 |
|
struct oev_entry *oev; |
929 |
|
|
930 |
3342 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
931 |
3342 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
932 |
3342 |
AN(event & OEV_MASK); |
933 |
3342 |
AZ(event & ~OEV_MASK); |
934 |
3342 |
if (!(event & oev_mask)) |
935 |
3253 |
return; |
936 |
|
|
937 |
89 |
PTOK(pthread_rwlock_rdlock(&oev_rwl)); |
938 |
194 |
VTAILQ_FOREACH(oev, &oev_list, list) { |
939 |
105 |
CHECK_OBJ_NOTNULL(oev, OEV_MAGIC); |
940 |
105 |
if (event & oev->mask) |
941 |
105 |
oev->func(wrk, oev->priv, oc, event); |
942 |
105 |
} |
943 |
89 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
944 |
|
|
945 |
3342 |
} |
946 |
|
|
947 |
|
void |
948 |
949 |
ObjInit(void) |
949 |
|
{ |
950 |
949 |
VTAILQ_INIT(&oev_list); |
951 |
949 |
PTOK(pthread_rwlock_init(&oev_rwl, NULL)); |
952 |
949 |
} |