| | varnish-cache/bin/varnishd/cache/cache_obj.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2013-2016 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* Lifetime of an objcore: |
30 |
|
* phase 0 - nonexistent |
31 |
|
* phase 1 - created, but no stevedore associated |
32 |
|
* phase 2 - stevedore associated, being filled out |
33 |
|
* phase 3 - stable, no changes happening |
34 |
|
* phase 4 - unavailable, being dismantled |
35 |
|
* phase 5 - stevedore disassociated |
36 |
|
* phase 6 - nonexistent |
37 |
|
* |
38 |
|
* 0->1 ObjNew() creates objcore |
39 |
|
* |
40 |
|
* 1->2 STV_NewObject() associates a stevedore |
41 |
|
* |
42 |
|
* 2 ObjSetState() sets state |
43 |
|
* 2 ObjWaitState() waits for particular state |
44 |
|
* INVALID->REQ_DONE->STREAM->FINISHED->FAILED |
45 |
|
* |
46 |
|
* 2 ObjGetSpace() allocates space |
47 |
|
* 2 ObjExtend() commits content |
48 |
|
* 2 ObjWaitExtend() waits for content - used to implement ObjIterate()) |
49 |
|
* |
50 |
|
* 2 ObjSetAttr() |
51 |
|
* 2 ObjCopyAttr() |
52 |
|
* 2 ObjSetFlag() |
53 |
|
* 2 ObjSetDouble() |
54 |
|
* 2 ObjSetU32() |
55 |
|
* 2 ObjSetU64() |
56 |
|
* |
57 |
|
* 2->3 ObjBocDone() Boc removed from OC, clean it up |
58 |
|
* |
59 |
|
* 23 ObjHasAttr() |
60 |
|
* 23 ObjGetAttr() |
61 |
|
* 23 ObjCheckFlag() |
62 |
|
* 23 ObjGetDouble() |
63 |
|
* 23 ObjGetU32() |
64 |
|
* 23 ObjGetU64() |
65 |
|
* 23 ObjGetLen() |
66 |
|
* 23 ObjGetXID() |
67 |
|
* |
68 |
|
* 23 ObjIterate() ... over body |
69 |
|
* |
70 |
|
* 23 ObjTouch() Signal to LRU(-like) facilities |
71 |
|
* |
72 |
|
* 3->4 HSH_Snipe() kill if not in use |
73 |
|
* 3->4 HSH_Kill() make unavailable |
74 |
|
* |
75 |
|
* 234 ObjSlim() Release body storage (but retain attribute storage) |
76 |
|
* |
77 |
|
* 4->5 ObjFreeObj() disassociates stevedore |
78 |
|
* |
79 |
|
* 5->6 FREE_OBJ() ...in HSH_DerefObjCore() |
80 |
|
*/ |
81 |
|
|
82 |
|
#include "config.h" |
83 |
|
|
84 |
|
#include <stdlib.h> |
85 |
|
|
86 |
|
#include "cache_varnishd.h" |
87 |
|
#include "cache_obj.h" |
88 |
|
#include "vend.h" |
89 |
|
#include "storage/storage.h" |
90 |
|
|
91 |
|
static const struct obj_methods * |
92 |
162422 |
obj_getmethods(const struct objcore *oc) |
93 |
|
{ |
94 |
|
|
95 |
162422 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
96 |
162422 |
CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
97 |
162422 |
AN(oc->stobj->stevedore->methods); |
98 |
162422 |
return (oc->stobj->stevedore->methods); |
99 |
|
} |
100 |
|
|
101 |
|
static struct boc * |
102 |
3213 |
obj_newboc(void) |
103 |
|
{ |
104 |
|
struct boc *boc; |
105 |
|
|
106 |
3213 |
ALLOC_OBJ(boc, BOC_MAGIC); |
107 |
3213 |
AN(boc); |
108 |
3213 |
Lck_New(&boc->mtx, lck_busyobj); |
109 |
3213 |
PTOK(pthread_cond_init(&boc->cond, NULL)); |
110 |
3213 |
boc->refcount = 1; |
111 |
3213 |
return (boc); |
112 |
|
} |
113 |
|
|
114 |
|
static void |
115 |
2928 |
obj_deleteboc(struct boc **p) |
116 |
|
{ |
117 |
|
struct boc *boc; |
118 |
|
|
119 |
2928 |
TAKE_OBJ_NOTNULL(boc, p, BOC_MAGIC); |
120 |
2928 |
Lck_Delete(&boc->mtx); |
121 |
2928 |
PTOK(pthread_cond_destroy(&boc->cond)); |
122 |
2928 |
free(boc->vary); |
123 |
2928 |
FREE_OBJ(boc); |
124 |
2928 |
} |
125 |
|
|
126 |
|
/*==================================================================== |
127 |
|
* ObjNew() |
128 |
|
* |
129 |
|
*/ |
130 |
|
|
131 |
|
struct objcore * |
132 |
3213 |
ObjNew(const struct worker *wrk) |
133 |
|
{ |
134 |
|
struct objcore *oc; |
135 |
|
|
136 |
3213 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
137 |
|
|
138 |
3213 |
ALLOC_OBJ(oc, OBJCORE_MAGIC); |
139 |
3213 |
AN(oc); |
140 |
3213 |
wrk->stats->n_objectcore++; |
141 |
3213 |
oc->last_lru = NAN; |
142 |
3213 |
oc->flags = OC_F_BUSY; |
143 |
|
|
144 |
3213 |
oc->boc = obj_newboc(); |
145 |
|
|
146 |
3213 |
return (oc); |
147 |
|
} |
148 |
|
|
149 |
|
/*==================================================================== |
150 |
|
* ObjDestroy() |
151 |
|
* |
152 |
|
*/ |
153 |
|
|
154 |
|
void |
155 |
1863 |
ObjDestroy(const struct worker *wrk, struct objcore **p) |
156 |
|
{ |
157 |
|
struct objcore *oc; |
158 |
|
|
159 |
1863 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
160 |
1863 |
TAKE_OBJ_NOTNULL(oc, p, OBJCORE_MAGIC); |
161 |
1863 |
if (oc->boc != NULL) |
162 |
35 |
obj_deleteboc(&oc->boc); |
163 |
1863 |
FREE_OBJ(oc); |
164 |
1863 |
wrk->stats->n_objectcore--; |
165 |
1863 |
} |
166 |
|
|
167 |
|
/*==================================================================== |
168 |
|
* ObjIterate() |
169 |
|
* |
170 |
|
*/ |
171 |
|
|
172 |
|
int |
173 |
2418 |
ObjIterate(struct worker *wrk, struct objcore *oc, |
174 |
|
void *priv, objiterate_f *func, int final) |
175 |
|
{ |
176 |
2418 |
const struct obj_methods *om = obj_getmethods(oc); |
177 |
|
|
178 |
2418 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
179 |
2418 |
AN(func); |
180 |
2418 |
AN(om->objiterator); |
181 |
2418 |
return (om->objiterator(wrk, oc, priv, func, final)); |
182 |
|
} |
183 |
|
|
184 |
|
/*==================================================================== |
185 |
|
* ObjVAI...(): Asynchronous Iteration |
186 |
|
* |
187 |
|
* |
188 |
|
* ObjVAIinit() returns an opaque handle, or NULL if not supported |
189 |
|
* |
190 |
|
* A VAI handle must not be used concurrently |
191 |
|
* |
192 |
|
* the vai_notify_cb(priv) will be called asynchronously by the storage |
193 |
|
* engine when a -EAGAIN / -ENOBUFS condition is over and ObjVAIlease() |
194 |
|
* can be called again. |
195 |
|
* |
196 |
|
* Note: |
197 |
|
* - the callback gets executed by an arbitrary thread |
198 |
|
* - WITH the boc mtx held |
199 |
|
* so it should never block and only do minimal work |
200 |
|
* |
201 |
|
* ObjVAIlease() fills the vscarab with leases. returns: |
202 |
|
* |
203 |
|
* -EAGAIN: nothing available at the moment, storage will notify, no use to |
204 |
|
* call again until notification |
205 |
|
* -ENOBUFS: caller needs to return leases, storage will notify |
206 |
|
* -EPIPE: BOS_FAILED for busy object |
207 |
|
* -(errno): other problem, fatal |
208 |
|
* |
209 |
|
* >= 0: number of viovs added (== scarab->capacity - scarab->used) |
210 |
|
* |
211 |
|
* struct vscarab: |
212 |
|
* |
213 |
|
* the leases can be used by the caller until returned with |
214 |
|
* ObjVAIreturn(). The storage guarantees that the lease member is a |
215 |
|
* multiple of 8 (that is, the lower three bits are zero). These can be |
216 |
|
* used by the caller between lease and return, but must be cleared to |
217 |
|
* zero before returning. |
218 |
|
* |
219 |
|
* ObjVAIbuffer() allocates temporary buffers, returns: |
220 |
|
* |
221 |
|
* -EAGAIN: allocation can not be fulfilled immediately, storage will notify, |
222 |
|
* no use to call again until notification |
223 |
|
* -EINVAL: size larger than UINT_MAX requested |
224 |
|
* -(errno): other problem, fatal |
225 |
|
* n: n > 0, number of viovs filled |
226 |
|
* |
227 |
|
* The struct vscarab is used on the way in and out: On the way in, the |
228 |
|
* iov.iov_len members contain the sizes the caller requests, all other |
229 |
|
* members of the struct viovs are expected to be zero initialized. |
230 |
|
* |
231 |
|
* The maximum size to be requested is UINT_MAX. |
232 |
|
* |
233 |
|
* ObjVAIbuffer() may return sizes larger than requested. The returned n |
234 |
|
* might be smaller than requested. |
235 |
|
* |
236 |
|
* ObjVAIreturn() returns leases collected in a struct vscaret |
237 |
|
* |
238 |
|
* it must be called with a vscaret, which holds an array of lease values |
239 |
|
* received via ObjVAIlease() or ObjVAIbuffer() when the caller can |
240 |
|
* guarantee that they are no longer accessed. |
241 |
|
* |
242 |
|
* ObjVAIreturn() may retain leases in the vscaret if the implementation |
243 |
|
* still requires them, iow, the vscaret might not be empty upon return. |
244 |
|
* |
245 |
|
* ObjVAIfini() finalized iteration |
246 |
|
* |
247 |
|
* it must be called when iteration is done, irrespective of error status |
248 |
|
*/ |
249 |
|
|
250 |
|
vai_hdl |
251 |
2450 |
ObjVAIinit(struct worker *wrk, struct objcore *oc, struct ws *ws, |
252 |
|
vai_notify_cb *cb, void *cb_priv) |
253 |
|
{ |
254 |
2450 |
const struct obj_methods *om = obj_getmethods(oc); |
255 |
|
|
256 |
2450 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
257 |
|
|
258 |
2450 |
if (om->vai_init == NULL) |
259 |
0 |
return (NULL); |
260 |
2450 |
return (om->vai_init(wrk, oc, ws, cb, cb_priv)); |
261 |
2450 |
} |
262 |
|
|
263 |
|
int |
264 |
6636 |
ObjVAIlease(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
265 |
|
{ |
266 |
6636 |
struct vai_hdl_preamble *vaip = vhdl; |
267 |
|
|
268 |
6636 |
AN(vaip); |
269 |
6636 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
270 |
6636 |
AN(vaip->vai_lease); |
271 |
6636 |
return (vaip->vai_lease(wrk, vhdl, scarab)); |
272 |
|
} |
273 |
|
|
274 |
|
int |
275 |
16 |
ObjVAIbuffer(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
276 |
|
{ |
277 |
16 |
struct vai_hdl_preamble *vaip = vhdl; |
278 |
|
|
279 |
16 |
AN(vaip); |
280 |
16 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
281 |
16 |
AN(vaip->vai_buffer); |
282 |
16 |
return (vaip->vai_buffer(wrk, vhdl, scarab)); |
283 |
|
} |
284 |
|
|
285 |
|
void |
286 |
2966 |
ObjVAIreturn(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
287 |
|
{ |
288 |
2966 |
struct vai_hdl_preamble *vaip = vhdl; |
289 |
|
|
290 |
2966 |
AN(vaip); |
291 |
2966 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
292 |
2966 |
AN(vaip->vai_return); |
293 |
2966 |
vaip->vai_return(wrk, vhdl, scaret); |
294 |
2966 |
} |
295 |
|
|
296 |
|
void |
297 |
2450 |
ObjVAIfini(struct worker *wrk, vai_hdl *vhdlp) |
298 |
|
{ |
299 |
2450 |
AN(vhdlp); |
300 |
2450 |
struct vai_hdl_preamble *vaip = *vhdlp; |
301 |
|
|
302 |
2450 |
AN(vaip); |
303 |
2450 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
304 |
2450 |
AN(vaip->vai_lease); |
305 |
2450 |
vaip->vai_fini(wrk, vhdlp); |
306 |
2450 |
} |
307 |
|
|
308 |
|
/*==================================================================== |
309 |
|
* ObjGetSpace() |
310 |
|
* |
311 |
|
* This function returns a pointer and length of free space. If there |
312 |
|
* is no free space, some will be added first. |
313 |
|
* |
314 |
|
* The "sz" argument is an input hint of how much space is desired. |
315 |
|
* 0 means "unknown", return some default size (maybe fetch_chunksize) |
316 |
|
*/ |
317 |
|
|
318 |
|
int |
319 |
58079 |
ObjGetSpace(struct worker *wrk, struct objcore *oc, ssize_t *sz, uint8_t **ptr) |
320 |
|
{ |
321 |
58079 |
const struct obj_methods *om = obj_getmethods(oc); |
322 |
|
|
323 |
58079 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
324 |
58079 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
325 |
58079 |
AN(sz); |
326 |
58079 |
AN(ptr); |
327 |
58079 |
assert(*sz >= 0); |
328 |
|
|
329 |
58079 |
AN(om->objgetspace); |
330 |
58079 |
return (om->objgetspace(wrk, oc, sz, ptr)); |
331 |
|
} |
332 |
|
|
333 |
|
/*==================================================================== |
334 |
|
* ObjExtend() |
335 |
|
* |
336 |
|
* This function extends the used part of the object a number of bytes |
337 |
|
* into the last space returned by ObjGetSpace() |
338 |
|
* |
339 |
|
* The final flag must be set on the last call, and it will release any |
340 |
|
* surplus space allocated. |
341 |
|
*/ |
342 |
|
|
343 |
|
static void |
344 |
56604 |
obj_extend_condwait(const struct objcore *oc) |
345 |
|
{ |
346 |
|
|
347 |
56604 |
if (oc->boc->transit_buffer == 0) |
348 |
56106 |
return; |
349 |
|
|
350 |
498 |
assert(oc->flags & OC_F_TRANSIENT); |
351 |
550 |
while (!(oc->flags & OC_F_CANCEL) && oc->boc->fetched_so_far > |
352 |
548 |
oc->boc->delivered_so_far + oc->boc->transit_buffer) |
353 |
52 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
354 |
56604 |
} |
355 |
|
|
356 |
|
// notify of an extension of the boc or state change |
357 |
|
|
358 |
|
static void |
359 |
64107 |
obj_boc_notify(struct boc *boc) |
360 |
|
{ |
361 |
|
struct vai_qe *qe, *next; |
362 |
|
|
363 |
64107 |
PTOK(pthread_cond_broadcast(&boc->cond)); |
364 |
64107 |
qe = VSLIST_FIRST(&boc->vai_q_head); |
365 |
64107 |
VSLIST_FIRST(&boc->vai_q_head) = NULL; |
366 |
65414 |
while (qe != NULL) { |
367 |
1307 |
CHECK_OBJ(qe, VAI_Q_MAGIC); |
368 |
1307 |
AN(qe->flags & VAI_QF_INQUEUE); |
369 |
1307 |
qe->flags &= ~VAI_QF_INQUEUE; |
370 |
1307 |
next = VSLIST_NEXT(qe, list); |
371 |
1307 |
VSLIST_NEXT(qe, list) = NULL; |
372 |
1307 |
qe->cb(qe->hdl, qe->priv); |
373 |
1307 |
qe = next; |
374 |
|
} |
375 |
64107 |
} |
376 |
|
|
377 |
|
void |
378 |
57298 |
ObjExtend(struct worker *wrk, struct objcore *oc, ssize_t l, int final) |
379 |
|
{ |
380 |
57298 |
const struct obj_methods *om = obj_getmethods(oc); |
381 |
|
|
382 |
57298 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
383 |
57298 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
384 |
57298 |
AN(om->objextend); |
385 |
57298 |
assert(l >= 0); |
386 |
|
|
387 |
57298 |
if (l > 0) { |
388 |
56604 |
Lck_Lock(&oc->boc->mtx); |
389 |
56604 |
obj_extend_condwait(oc); |
390 |
56604 |
om->objextend(wrk, oc, l); |
391 |
56604 |
oc->boc->fetched_so_far += l; |
392 |
56604 |
obj_boc_notify(oc->boc); |
393 |
56604 |
Lck_Unlock(&oc->boc->mtx); |
394 |
|
|
395 |
56604 |
if (oc->boc->transit_buffer > 0) |
396 |
498 |
wrk->stats->transit_buffered += l; |
397 |
56106 |
else if (oc->flags & OC_F_TRANSIENT) |
398 |
1336 |
wrk->stats->transit_stored += l; |
399 |
56604 |
} |
400 |
|
|
401 |
57298 |
assert(oc->boc->state < BOS_FINISHED); |
402 |
57298 |
if (final && om->objtrimstore != NULL) |
403 |
1886 |
om->objtrimstore(wrk, oc); |
404 |
57298 |
} |
405 |
|
|
406 |
|
/*==================================================================== |
407 |
|
*/ |
408 |
|
|
409 |
|
static inline void |
410 |
4531 |
objSignalFetchLocked(const struct objcore *oc, uint64_t l) |
411 |
|
{ |
412 |
4531 |
if (oc->boc->transit_buffer > 0) { |
413 |
921 |
assert(oc->flags & OC_F_TRANSIENT); |
414 |
|
/* Signal the new client position */ |
415 |
921 |
oc->boc->delivered_so_far = l; |
416 |
921 |
PTOK(pthread_cond_signal(&oc->boc->cond)); |
417 |
921 |
} |
418 |
4531 |
} |
419 |
|
|
420 |
|
uint64_t |
421 |
0 |
ObjWaitExtend(const struct worker *wrk, const struct objcore *oc, uint64_t l, |
422 |
|
enum boc_state_e *statep) |
423 |
|
{ |
424 |
|
enum boc_state_e state; |
425 |
|
uint64_t rv; |
426 |
|
|
427 |
0 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
428 |
0 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
429 |
0 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
430 |
0 |
Lck_Lock(&oc->boc->mtx); |
431 |
0 |
while (1) { |
432 |
0 |
rv = oc->boc->fetched_so_far; |
433 |
0 |
assert(l <= rv || oc->boc->state == BOS_FAILED); |
434 |
0 |
state = oc->boc->state; |
435 |
0 |
objSignalFetchLocked(oc, l); |
436 |
0 |
if (rv > l || state >= BOS_FINISHED) |
437 |
0 |
break; |
438 |
0 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
439 |
|
} |
440 |
0 |
Lck_Unlock(&oc->boc->mtx); |
441 |
0 |
if (statep != NULL) |
442 |
0 |
*statep = state; |
443 |
0 |
return (rv); |
444 |
|
} |
445 |
|
|
446 |
|
// get a new extension _or_ register a notification |
447 |
|
uint64_t |
448 |
4531 |
ObjVAIGetExtend(struct worker *wrk, const struct objcore *oc, uint64_t l, |
449 |
|
enum boc_state_e *statep, struct vai_qe *qe) |
450 |
|
{ |
451 |
|
enum boc_state_e state; |
452 |
|
uint64_t rv; |
453 |
|
|
454 |
4531 |
(void) wrk; |
455 |
4531 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
456 |
4531 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
457 |
4531 |
CHECK_OBJ_NOTNULL(qe, VAI_Q_MAGIC); |
458 |
4531 |
Lck_Lock(&oc->boc->mtx); |
459 |
4531 |
rv = oc->boc->fetched_so_far; |
460 |
4531 |
assert(l <= rv || oc->boc->state == BOS_FAILED); |
461 |
4531 |
state = oc->boc->state; |
462 |
4531 |
objSignalFetchLocked(oc, l); |
463 |
4531 |
if (l == rv && state < BOS_FINISHED && |
464 |
2534 |
(qe->flags & VAI_QF_INQUEUE) == 0) { |
465 |
1316 |
qe->flags |= VAI_QF_INQUEUE; |
466 |
1316 |
VSLIST_INSERT_HEAD(&oc->boc->vai_q_head, qe, list); |
467 |
1316 |
} |
468 |
4531 |
Lck_Unlock(&oc->boc->mtx); |
469 |
4531 |
if (statep != NULL) |
470 |
4529 |
*statep = state; |
471 |
4531 |
return (rv); |
472 |
|
} |
473 |
|
|
474 |
|
void |
475 |
759 |
ObjVAICancel(struct worker *wrk, struct boc *boc, struct vai_qe *qe) |
476 |
|
{ |
477 |
|
|
478 |
759 |
(void) wrk; |
479 |
759 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
480 |
759 |
CHECK_OBJ_NOTNULL(qe, VAI_Q_MAGIC); |
481 |
|
|
482 |
759 |
Lck_Lock(&boc->mtx); |
483 |
|
// inefficient, but should be rare |
484 |
759 |
if ((qe->flags & VAI_QF_INQUEUE) != 0) |
485 |
9 |
VSLIST_REMOVE(&boc->vai_q_head, qe, vai_qe, list); |
486 |
759 |
qe->flags = 0; |
487 |
759 |
Lck_Unlock(&boc->mtx); |
488 |
759 |
} |
489 |
|
|
490 |
|
/*==================================================================== |
491 |
|
*/ |
492 |
|
|
493 |
|
void |
494 |
7507 |
ObjSetState(struct worker *wrk, const struct objcore *oc, |
495 |
|
enum boc_state_e next) |
496 |
|
{ |
497 |
|
const struct obj_methods *om; |
498 |
|
|
499 |
7507 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
500 |
7507 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
501 |
7507 |
assert(next > oc->boc->state); |
502 |
|
|
503 |
7507 |
CHECK_OBJ_ORNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
504 |
7507 |
assert(next != BOS_STREAM || oc->boc->state == BOS_PREP_STREAM); |
505 |
7507 |
assert(next != BOS_FINISHED || (oc->oa_present & (1 << OA_LEN))); |
506 |
|
|
507 |
7507 |
if (oc->stobj->stevedore != NULL) { |
508 |
5230 |
om = oc->stobj->stevedore->methods; |
509 |
5230 |
if (om->objsetstate != NULL) |
510 |
0 |
om->objsetstate(wrk, oc, next); |
511 |
5230 |
} |
512 |
|
|
513 |
7507 |
Lck_Lock(&oc->boc->mtx); |
514 |
7507 |
oc->boc->state = next; |
515 |
7507 |
obj_boc_notify(oc->boc); |
516 |
7507 |
Lck_Unlock(&oc->boc->mtx); |
517 |
7507 |
} |
518 |
|
|
519 |
|
/*==================================================================== |
520 |
|
*/ |
521 |
|
|
522 |
|
void |
523 |
2614 |
ObjWaitState(const struct objcore *oc, enum boc_state_e want) |
524 |
|
{ |
525 |
|
|
526 |
2614 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
527 |
2614 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
528 |
|
|
529 |
2614 |
Lck_Lock(&oc->boc->mtx); |
530 |
|
/* wake up obj_extend_condwait() */ |
531 |
2614 |
if (oc->flags & OC_F_CANCEL) |
532 |
353 |
PTOK(pthread_cond_signal(&oc->boc->cond)); |
533 |
31562 |
while (1) { |
534 |
31562 |
if (oc->boc->state >= want) |
535 |
2614 |
break; |
536 |
28948 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
537 |
|
} |
538 |
2614 |
Lck_Unlock(&oc->boc->mtx); |
539 |
2614 |
} |
540 |
|
|
541 |
|
/*==================================================================== |
542 |
|
* ObjGetlen() |
543 |
|
* |
544 |
|
* This is a separate function because it may need locking |
545 |
|
*/ |
546 |
|
|
547 |
|
uint64_t |
548 |
5370 |
ObjGetLen(struct worker *wrk, struct objcore *oc) |
549 |
|
{ |
550 |
|
uint64_t len; |
551 |
|
|
552 |
5370 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
553 |
|
|
554 |
5370 |
AZ(ObjGetU64(wrk, oc, OA_LEN, &len)); |
555 |
5370 |
return (len); |
556 |
|
} |
557 |
|
|
558 |
|
/*==================================================================== |
559 |
|
* ObjSlim() |
560 |
|
* |
561 |
|
* Free the whatever storage can be freed, without freeing the actual |
562 |
|
* object yet. |
563 |
|
*/ |
564 |
|
|
565 |
|
void |
566 |
1407 |
ObjSlim(struct worker *wrk, struct objcore *oc) |
567 |
|
{ |
568 |
1407 |
const struct obj_methods *om = obj_getmethods(oc); |
569 |
|
|
570 |
1407 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
571 |
|
|
572 |
1407 |
if (om->objslim != NULL) |
573 |
1407 |
om->objslim(wrk, oc); |
574 |
1407 |
} |
575 |
|
|
576 |
|
/*==================================================================== |
577 |
|
* Called when the boc used to populate the objcore is going away. |
578 |
|
* Useful for releasing any leftovers from Trim. |
579 |
|
*/ |
580 |
|
|
581 |
|
void |
582 |
2893 |
ObjBocDone(struct worker *wrk, struct objcore *oc, struct boc **boc) |
583 |
|
{ |
584 |
|
const struct obj_methods *m; |
585 |
|
|
586 |
2893 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
587 |
2893 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
588 |
2893 |
AN(boc); |
589 |
2893 |
CHECK_OBJ_NOTNULL(*boc, BOC_MAGIC); |
590 |
2893 |
CHECK_OBJ_ORNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
591 |
2893 |
if (oc->stobj->stevedore != NULL) { |
592 |
2840 |
m = obj_getmethods(oc); |
593 |
2840 |
if (m->objbocdone != NULL) |
594 |
2840 |
m->objbocdone(wrk, oc, *boc); |
595 |
2840 |
} |
596 |
2893 |
obj_deleteboc(boc); |
597 |
2893 |
} |
598 |
|
|
599 |
|
/*==================================================================== |
600 |
|
*/ |
601 |
|
void |
602 |
1796 |
ObjFreeObj(struct worker *wrk, struct objcore *oc) |
603 |
|
{ |
604 |
1796 |
const struct obj_methods *m = obj_getmethods(oc); |
605 |
|
|
606 |
1796 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
607 |
|
|
608 |
1796 |
AN(m->objfree); |
609 |
1796 |
m->objfree(wrk, oc); |
610 |
1796 |
AZ(oc->stobj->stevedore); |
611 |
1796 |
} |
612 |
|
|
613 |
|
/*==================================================================== |
614 |
|
* ObjHasAttr() |
615 |
|
* |
616 |
|
* Check if object has this attribute |
617 |
|
*/ |
618 |
|
|
619 |
|
int |
620 |
7813 |
ObjHasAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr) |
621 |
|
{ |
622 |
|
|
623 |
7813 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
624 |
7813 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
625 |
|
|
626 |
7813 |
if (oc->oa_present) |
627 |
7792 |
return (oc->oa_present & (1 << attr)); |
628 |
|
|
629 |
|
/* resurrected persistent objects don't have oa_present set */ |
630 |
21 |
return (ObjGetAttr(wrk, oc, attr, NULL) != NULL ? 1 : 0); |
631 |
7813 |
} |
632 |
|
|
633 |
|
/*==================================================================== |
634 |
|
* ObjGetAttr() |
635 |
|
* |
636 |
|
* Get an attribute of the object. |
637 |
|
* |
638 |
|
* Returns NULL on unset or zero length attributes and len set to |
639 |
|
* zero. Returns Non-NULL otherwise and len is updated with the attributes |
640 |
|
* length. |
641 |
|
*/ |
642 |
|
|
643 |
|
const void * |
644 |
22316 |
ObjGetAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
645 |
|
ssize_t *len) |
646 |
|
{ |
647 |
22316 |
const struct obj_methods *om = obj_getmethods(oc); |
648 |
|
|
649 |
22316 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
650 |
|
|
651 |
22316 |
AN(om->objgetattr); |
652 |
22316 |
return (om->objgetattr(wrk, oc, attr, len)); |
653 |
|
} |
654 |
|
|
655 |
|
/*==================================================================== |
656 |
|
* ObjSetAttr() |
657 |
|
* |
658 |
|
* Setting fixed size attributes always succeeds. |
659 |
|
* |
660 |
|
* Setting a variable size attribute asserts if the combined size of the |
661 |
|
* variable attributes exceeds the total variable attribute space set at |
662 |
|
* object creation. If there is space it always succeeds. |
663 |
|
* |
664 |
|
* Setting an auxiliary attribute can fail. |
665 |
|
* |
666 |
|
* Resetting any variable asserts if the new length does not match the |
667 |
|
* previous length exactly. |
668 |
|
* |
669 |
|
* If ptr is Non-NULL, it points to the new content which is copied into |
670 |
|
* the attribute. Otherwise the caller will have to do the copying. |
671 |
|
* |
672 |
|
* Return value is non-NULL on success and NULL on failure. If ptr was |
673 |
|
* non-NULL, it is an error to use the returned pointer to set the |
674 |
|
* attribute data, it is only a success indicator in that case. |
675 |
|
*/ |
676 |
|
|
677 |
|
void * |
678 |
10579 |
ObjSetAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
679 |
|
ssize_t len, const void *ptr) |
680 |
|
{ |
681 |
10579 |
const struct obj_methods *om = obj_getmethods(oc); |
682 |
|
void *r; |
683 |
|
|
684 |
10579 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
685 |
10579 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
686 |
|
|
687 |
10579 |
AN(om->objsetattr); |
688 |
10579 |
assert((int)attr < 16); |
689 |
10579 |
r = om->objsetattr(wrk, oc, attr, len, ptr); |
690 |
10579 |
if (r) |
691 |
10577 |
oc->oa_present |= (1 << attr); |
692 |
10579 |
return (r); |
693 |
|
} |
694 |
|
|
695 |
|
/*==================================================================== |
696 |
|
* ObjTouch() |
697 |
|
*/ |
698 |
|
|
699 |
|
void |
700 |
3259 |
ObjTouch(struct worker *wrk, struct objcore *oc, vtim_real now) |
701 |
|
{ |
702 |
3259 |
const struct obj_methods *om = obj_getmethods(oc); |
703 |
|
|
704 |
3259 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
705 |
3259 |
if (om->objtouch != NULL) |
706 |
3258 |
om->objtouch(wrk, oc, now); |
707 |
3259 |
} |
708 |
|
|
709 |
|
/*==================================================================== |
710 |
|
* Utility functions which work on top of the previous ones |
711 |
|
*/ |
712 |
|
|
713 |
|
int |
714 |
49 |
ObjCopyAttr(struct worker *wrk, struct objcore *oc, struct objcore *ocs, |
715 |
|
enum obj_attr attr) |
716 |
|
{ |
717 |
|
const void *vps; |
718 |
|
void *vpd; |
719 |
|
ssize_t l; |
720 |
|
|
721 |
49 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
722 |
49 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
723 |
49 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
724 |
49 |
CHECK_OBJ_NOTNULL(ocs, OBJCORE_MAGIC); |
725 |
|
|
726 |
49 |
vps = ObjGetAttr(wrk, ocs, attr, &l); |
727 |
|
// XXX: later we want to have zero-length OA's too |
728 |
49 |
if (vps == NULL || l <= 0) |
729 |
0 |
return (-1); |
730 |
49 |
vpd = ObjSetAttr(wrk, oc, attr, l, vps); |
731 |
49 |
if (vpd == NULL) |
732 |
0 |
return (-1); |
733 |
49 |
return (0); |
734 |
49 |
} |
735 |
|
|
736 |
|
int |
737 |
2218 |
ObjSetXID(struct worker *wrk, struct objcore *oc, vxid_t xid) |
738 |
|
{ |
739 |
|
uint64_t u; |
740 |
|
|
741 |
2218 |
u = VXID(xid); |
742 |
2218 |
AZ(ObjSetU64(wrk, oc, OA_VXID, u)); |
743 |
2218 |
return (0); |
744 |
|
} |
745 |
|
|
746 |
|
|
747 |
|
vxid_t |
748 |
2898 |
ObjGetXID(struct worker *wrk, struct objcore *oc) |
749 |
|
{ |
750 |
|
vxid_t u; |
751 |
|
|
752 |
2898 |
AZ(ObjGetU64(wrk, oc, OA_VXID, &u.vxid)); |
753 |
2898 |
return (u); |
754 |
|
} |
755 |
|
|
756 |
|
/*-------------------------------------------------------------------- |
757 |
|
* There is no well-defined byteorder for IEEE-754 double and the |
758 |
|
* correct solution (frexp(3) and manual encoding) is more work |
759 |
|
* than our (weak) goal of being endian-agnostic requires at this point. |
760 |
|
* We give it a shot by memcpy'ing doubles over a uint64_t and then |
761 |
|
* BE encode that. |
762 |
|
*/ |
763 |
|
|
764 |
|
int |
765 |
2218 |
ObjSetDouble(struct worker *wrk, struct objcore *oc, enum obj_attr a, double t) |
766 |
|
{ |
767 |
|
void *vp; |
768 |
|
uint64_t u; |
769 |
|
|
770 |
2218 |
assert(sizeof t == sizeof u); |
771 |
2218 |
memcpy(&u, &t, sizeof u); |
772 |
2218 |
vp = ObjSetAttr(wrk, oc, a, sizeof u, NULL); |
773 |
2218 |
if (vp == NULL) |
774 |
0 |
return (-1); |
775 |
2218 |
vbe64enc(vp, u); |
776 |
2218 |
return (0); |
777 |
2218 |
} |
778 |
|
|
779 |
|
int |
780 |
2 |
ObjGetDouble(struct worker *wrk, struct objcore *oc, enum obj_attr a, double *d) |
781 |
|
{ |
782 |
|
const void *vp; |
783 |
|
uint64_t u; |
784 |
|
ssize_t l; |
785 |
|
|
786 |
2 |
assert(sizeof *d == sizeof u); |
787 |
2 |
vp = ObjGetAttr(wrk, oc, a, &l); |
788 |
2 |
if (vp == NULL) |
789 |
0 |
return (-1); |
790 |
2 |
if (d != NULL) { |
791 |
2 |
assert(l == sizeof u); |
792 |
2 |
u = vbe64dec(vp); |
793 |
2 |
memcpy(d, &u, sizeof *d); |
794 |
2 |
} |
795 |
2 |
return (0); |
796 |
2 |
} |
797 |
|
|
798 |
|
/*-------------------------------------------------------------------- |
799 |
|
*/ |
800 |
|
|
801 |
|
int |
802 |
4942 |
ObjSetU64(struct worker *wrk, struct objcore *oc, enum obj_attr a, uint64_t t) |
803 |
|
{ |
804 |
|
void *vp; |
805 |
|
|
806 |
4942 |
vp = ObjSetAttr(wrk, oc, a, sizeof t, NULL); |
807 |
4942 |
if (vp == NULL) |
808 |
0 |
return (-1); |
809 |
4942 |
vbe64enc(vp, t); |
810 |
4942 |
return (0); |
811 |
4942 |
} |
812 |
|
|
813 |
|
int |
814 |
8269 |
ObjGetU64(struct worker *wrk, struct objcore *oc, enum obj_attr a, uint64_t *d) |
815 |
|
{ |
816 |
|
const void *vp; |
817 |
|
ssize_t l; |
818 |
|
|
819 |
8269 |
vp = ObjGetAttr(wrk, oc, a, &l); |
820 |
8269 |
if (vp == NULL || l != sizeof *d) |
821 |
0 |
return (-1); |
822 |
8269 |
if (d != NULL) |
823 |
8269 |
*d = vbe64dec(vp); |
824 |
8269 |
return (0); |
825 |
8269 |
} |
826 |
|
|
827 |
|
/*-------------------------------------------------------------------- |
828 |
|
*/ |
829 |
|
|
830 |
|
int |
831 |
7285 |
ObjCheckFlag(struct worker *wrk, struct objcore *oc, enum obj_flags of) |
832 |
|
{ |
833 |
|
const uint8_t *fp; |
834 |
|
|
835 |
7285 |
fp = ObjGetAttr(wrk, oc, OA_FLAGS, NULL); |
836 |
7285 |
AN(fp); |
837 |
7285 |
return ((*fp) & of); |
838 |
|
} |
839 |
|
|
840 |
|
void |
841 |
491 |
ObjSetFlag(struct worker *wrk, struct objcore *oc, enum obj_flags of, int val) |
842 |
|
{ |
843 |
|
uint8_t *fp; |
844 |
|
|
845 |
491 |
fp = ObjSetAttr(wrk, oc, OA_FLAGS, 1, NULL); |
846 |
491 |
AN(fp); |
847 |
491 |
if (val) |
848 |
489 |
(*fp) |= of; |
849 |
|
else |
850 |
2 |
(*fp) &= ~of; |
851 |
491 |
} |
852 |
|
|
853 |
|
/*==================================================================== |
854 |
|
* Object event subscription mechanism. |
855 |
|
* |
856 |
|
* XXX: it is extremely unclear what the locking circumstances are here. |
857 |
|
*/ |
858 |
|
|
859 |
|
struct oev_entry { |
860 |
|
unsigned magic; |
861 |
|
#define OEV_MAGIC 0xb0b7c5a1 |
862 |
|
unsigned mask; |
863 |
|
obj_event_f *func; |
864 |
|
void *priv; |
865 |
|
VTAILQ_ENTRY(oev_entry) list; |
866 |
|
}; |
867 |
|
|
868 |
|
static VTAILQ_HEAD(,oev_entry) oev_list; |
869 |
|
static pthread_rwlock_t oev_rwl; |
870 |
|
static unsigned oev_mask; |
871 |
|
|
872 |
|
/* |
873 |
|
* NB: ObjSubscribeEvents() is not atomic: |
874 |
|
* oev_mask is checked optimistically in ObjSendEvent() |
875 |
|
*/ |
876 |
|
uintptr_t |
877 |
39 |
ObjSubscribeEvents(obj_event_f *func, void *priv, unsigned mask) |
878 |
|
{ |
879 |
|
struct oev_entry *oev; |
880 |
|
|
881 |
39 |
AN(func); |
882 |
39 |
AZ(mask & ~OEV_MASK); |
883 |
|
|
884 |
39 |
ALLOC_OBJ(oev, OEV_MAGIC); |
885 |
39 |
AN(oev); |
886 |
39 |
oev->func = func; |
887 |
39 |
oev->priv = priv; |
888 |
39 |
oev->mask = mask; |
889 |
39 |
PTOK(pthread_rwlock_wrlock(&oev_rwl)); |
890 |
39 |
VTAILQ_INSERT_TAIL(&oev_list, oev, list); |
891 |
39 |
oev_mask |= mask; |
892 |
39 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
893 |
39 |
return ((uintptr_t)oev); |
894 |
|
} |
895 |
|
|
896 |
|
void |
897 |
1 |
ObjUnsubscribeEvents(uintptr_t *handle) |
898 |
|
{ |
899 |
1 |
struct oev_entry *oev, *oev2 = NULL; |
900 |
1 |
unsigned newmask = 0; |
901 |
|
|
902 |
1 |
AN(handle); |
903 |
1 |
AN(*handle); |
904 |
1 |
PTOK(pthread_rwlock_wrlock(&oev_rwl)); |
905 |
2 |
VTAILQ_FOREACH(oev, &oev_list, list) { |
906 |
1 |
CHECK_OBJ_NOTNULL(oev, OEV_MAGIC); |
907 |
1 |
if ((uintptr_t)oev == *handle) |
908 |
1 |
oev2 = oev; |
909 |
|
else |
910 |
0 |
newmask |= oev->mask; |
911 |
1 |
} |
912 |
1 |
AN(oev2); |
913 |
1 |
VTAILQ_REMOVE(&oev_list, oev2, list); |
914 |
1 |
oev_mask = newmask; |
915 |
1 |
AZ(newmask & ~OEV_MASK); |
916 |
1 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
917 |
1 |
FREE_OBJ(oev2); |
918 |
1 |
*handle = 0; |
919 |
1 |
} |
920 |
|
|
921 |
|
void |
922 |
3277 |
ObjSendEvent(struct worker *wrk, struct objcore *oc, unsigned event) |
923 |
|
{ |
924 |
|
struct oev_entry *oev; |
925 |
|
|
926 |
3277 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
927 |
3277 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
928 |
3277 |
AN(event & OEV_MASK); |
929 |
3277 |
AZ(event & ~OEV_MASK); |
930 |
3277 |
if (!(event & oev_mask)) |
931 |
3188 |
return; |
932 |
|
|
933 |
89 |
PTOK(pthread_rwlock_rdlock(&oev_rwl)); |
934 |
194 |
VTAILQ_FOREACH(oev, &oev_list, list) { |
935 |
105 |
CHECK_OBJ_NOTNULL(oev, OEV_MAGIC); |
936 |
105 |
if (event & oev->mask) |
937 |
105 |
oev->func(wrk, oev->priv, oc, event); |
938 |
105 |
} |
939 |
89 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
940 |
|
|
941 |
3277 |
} |
942 |
|
|
943 |
|
void |
944 |
939 |
ObjInit(void) |
945 |
|
{ |
946 |
939 |
VTAILQ_INIT(&oev_list); |
947 |
939 |
PTOK(pthread_rwlock_init(&oev_rwl, NULL)); |
948 |
939 |
} |