| | varnish-cache/bin/varnishd/cache/cache_ws_emu.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2021 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Dridi Boukelmoune <dridi.boukelmoune@gmail.com> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#ifdef ENABLE_WORKSPACE_EMULATOR |
34 |
|
|
35 |
|
#if HAVE_SANITIZER_ASAN_INTERFACE_H |
36 |
|
# include <sanitizer/asan_interface.h> |
37 |
|
#endif |
38 |
|
|
39 |
|
#include "cache_varnishd.h" |
40 |
|
|
41 |
|
#include <stdlib.h> |
42 |
|
|
43 |
|
struct ws_alloc { |
44 |
|
unsigned magic; |
45 |
|
#define WS_ALLOC_MAGIC 0x22e7fd05 |
46 |
|
unsigned off; |
47 |
|
unsigned len; |
48 |
|
char *ptr; |
49 |
|
VTAILQ_ENTRY(ws_alloc) list; |
50 |
|
}; |
51 |
|
|
52 |
|
VTAILQ_HEAD(ws_alloc_head, ws_alloc); |
53 |
|
|
54 |
|
struct ws_emu { |
55 |
|
unsigned magic; |
56 |
|
#define WS_EMU_MAGIC 0x1c89b6ab |
57 |
|
unsigned len; |
58 |
|
struct ws *ws; |
59 |
|
struct ws_alloc_head head; |
60 |
|
}; |
61 |
|
|
62 |
|
static const uintptr_t snap_overflowed = (uintptr_t)&snap_overflowed; |
63 |
|
|
64 |
|
static struct ws_emu * |
65 |
0 |
ws_emu(const struct ws *ws) |
66 |
|
{ |
67 |
|
struct ws_emu *we; |
68 |
|
|
69 |
0 |
CAST_OBJ_NOTNULL(we, (void *)ws->s, WS_EMU_MAGIC); |
70 |
0 |
return (we); |
71 |
|
} |
72 |
|
|
73 |
|
void |
74 |
0 |
WS_Assert(const struct ws *ws) |
75 |
|
{ |
76 |
|
struct ws_emu *we; |
77 |
0 |
struct ws_alloc *wa, *wa2 = NULL; |
78 |
|
size_t len; |
79 |
|
|
80 |
0 |
CHECK_OBJ_NOTNULL(ws, WS_MAGIC); |
81 |
0 |
assert(ws->s != NULL); |
82 |
0 |
assert(PAOK(ws->s)); |
83 |
0 |
assert(ws->e != NULL); |
84 |
0 |
assert(PAOK(ws->e)); |
85 |
|
|
86 |
0 |
we = ws_emu(ws); |
87 |
0 |
len = pdiff(ws->s, ws->e); |
88 |
0 |
assert(len == we->len); |
89 |
|
|
90 |
0 |
len = 0; |
91 |
0 |
VTAILQ_FOREACH(wa, &we->head, list) { |
92 |
0 |
CHECK_OBJ_NOTNULL(wa, WS_ALLOC_MAGIC); |
93 |
0 |
wa2 = wa; |
94 |
0 |
assert(len == wa->off); |
95 |
0 |
if (wa->ptr == ws->f || wa->ptr == NULL) /* reservation */ |
96 |
0 |
break; |
97 |
0 |
AN(wa->len); |
98 |
0 |
len += PRNDUP(wa->len); |
99 |
0 |
assert(len <= we->len); |
100 |
0 |
} |
101 |
|
|
102 |
0 |
if (wa != NULL) { |
103 |
0 |
AZ(VTAILQ_NEXT(wa, list)); |
104 |
0 |
if (wa->ptr == NULL) { |
105 |
0 |
AZ(wa->len); |
106 |
0 |
assert(ws->f == ws->e); |
107 |
0 |
assert(ws->r == ws->e); |
108 |
0 |
} else { |
109 |
0 |
AN(wa->len); |
110 |
0 |
assert(ws->f == wa->ptr); |
111 |
0 |
assert(ws->r == ws->f + wa->len); |
112 |
|
} |
113 |
0 |
len += PRNDUP(wa->len); |
114 |
0 |
assert(len <= we->len); |
115 |
0 |
} else { |
116 |
0 |
AZ(ws->f); |
117 |
0 |
AZ(ws->r); |
118 |
|
} |
119 |
|
|
120 |
0 |
DSLb(DBG_WORKSPACE, "WS(%p) = (%s, %p %zu %zu %zu)", |
121 |
|
ws, ws->id, ws->s, wa2 == NULL ? 0 : wa2->off + PRNDUP(wa2->len), |
122 |
|
ws->r == NULL ? 0 : pdiff(ws->f, ws->r), |
123 |
|
pdiff(ws->s, ws->e)); |
124 |
0 |
} |
125 |
|
|
126 |
|
int |
127 |
0 |
WS_Allocated(const struct ws *ws, const void *ptr, ssize_t len) |
128 |
|
{ |
129 |
|
struct ws_emu *we; |
130 |
|
struct ws_alloc *wa; |
131 |
|
uintptr_t p, pa; |
132 |
|
|
133 |
0 |
WS_Assert(ws); |
134 |
0 |
AN(ptr); |
135 |
0 |
if (len < 0) |
136 |
0 |
len = strlen(ptr) + 1; |
137 |
0 |
p = (uintptr_t)ptr; |
138 |
0 |
we = ws_emu(ws); |
139 |
|
|
140 |
0 |
VTAILQ_FOREACH(wa, &we->head, list) { |
141 |
0 |
pa = (uintptr_t)wa->ptr; |
142 |
0 |
if (p >= (uintptr_t)ws->f && p <= (uintptr_t)ws->r) |
143 |
0 |
return (1); |
144 |
|
/* XXX: clang 12's ubsan triggers a pointer overflow on |
145 |
|
* the if statement below. Since the purpose is to check |
146 |
|
* that a pointer+length is within bounds of another |
147 |
|
* pointer+length it's unclear whether a pointer overflow |
148 |
|
* is relevant. Worked around for now with uintptr_t. |
149 |
|
*/ |
150 |
0 |
if (p >= pa && p + len <= pa + wa->len) |
151 |
0 |
return (1); |
152 |
0 |
} |
153 |
0 |
return (0); |
154 |
0 |
} |
155 |
|
|
156 |
|
void |
157 |
0 |
WS_Init(struct ws *ws, const char *id, void *space, unsigned len) |
158 |
|
{ |
159 |
|
struct ws_emu *we; |
160 |
|
|
161 |
0 |
DSLb(DBG_WORKSPACE, |
162 |
|
"WS_Init(%p, \"%s\", %p, %u)", ws, id, space, len); |
163 |
0 |
assert(space != NULL); |
164 |
0 |
assert(PAOK(space)); |
165 |
0 |
assert(len >= sizeof *we); |
166 |
|
|
167 |
0 |
len = PRNDDN(len - 1); |
168 |
0 |
INIT_OBJ(ws, WS_MAGIC); |
169 |
0 |
ws->s = space; |
170 |
0 |
ws->e = ws->s + len; |
171 |
|
|
172 |
0 |
assert(id[0] & 0x20); // cheesy islower() |
173 |
0 |
bstrcpy(ws->id, id); |
174 |
|
|
175 |
0 |
we = space; |
176 |
0 |
INIT_OBJ(we, WS_EMU_MAGIC); |
177 |
0 |
VTAILQ_INIT(&we->head); |
178 |
0 |
we->len = len; |
179 |
|
|
180 |
0 |
WS_Assert(ws); |
181 |
0 |
} |
182 |
|
|
183 |
|
static void |
184 |
0 |
ws_alloc_free(struct ws_emu *we, struct ws_alloc **wap) |
185 |
|
{ |
186 |
|
struct ws_alloc *wa; |
187 |
|
|
188 |
0 |
TAKE_OBJ_NOTNULL(wa, wap, WS_ALLOC_MAGIC); |
189 |
0 |
AZ(VTAILQ_NEXT(wa, list)); |
190 |
0 |
VTAILQ_REMOVE(&we->head, wa, list); |
191 |
0 |
free(wa->ptr); |
192 |
0 |
FREE_OBJ(wa); |
193 |
0 |
} |
194 |
|
|
195 |
|
void |
196 |
0 |
WS_Reset(struct ws *ws, uintptr_t pp) |
197 |
|
{ |
198 |
|
struct ws_emu *we; |
199 |
|
struct ws_alloc *wa; |
200 |
|
char *p; |
201 |
|
|
202 |
0 |
WS_Assert(ws); |
203 |
0 |
AN(pp); |
204 |
0 |
if (pp == snap_overflowed) { |
205 |
0 |
DSLb(DBG_WORKSPACE, "WS_Reset(%p, overflowed)", ws); |
206 |
0 |
AN(WS_Overflowed(ws)); |
207 |
0 |
return; |
208 |
|
} |
209 |
0 |
p = (char *)pp; |
210 |
0 |
DSLb(DBG_WORKSPACE, "WS_Reset(%p, %p)", ws, p); |
211 |
0 |
AZ(ws->r); |
212 |
|
|
213 |
0 |
we = ws_emu(ws); |
214 |
0 |
while ((wa = VTAILQ_LAST(&we->head, ws_alloc_head)) != NULL && |
215 |
0 |
wa->ptr != p) |
216 |
0 |
ws_alloc_free(we, &wa); |
217 |
0 |
if (wa == NULL) |
218 |
0 |
assert(p == ws->s); |
219 |
|
|
220 |
0 |
WS_Assert(ws); |
221 |
0 |
} |
222 |
|
|
223 |
|
int |
224 |
0 |
WS_Pipeline(struct ws *ws, const void *b, const void *e, unsigned rollback) |
225 |
|
{ |
226 |
|
void *tmp; |
227 |
|
unsigned r, l; |
228 |
|
|
229 |
0 |
WS_Assert(ws); |
230 |
0 |
AZ(ws->f); |
231 |
0 |
AZ(ws->r); |
232 |
|
|
233 |
|
/* NB: the pipeline cannot be moved if it comes from the same |
234 |
|
* workspace because a rollback would free the memory. This is |
235 |
|
* emulated with two copies instead. |
236 |
|
*/ |
237 |
|
|
238 |
0 |
if (b != NULL) { |
239 |
0 |
AN(e); |
240 |
0 |
l = pdiff(b, e); |
241 |
0 |
tmp = malloc(l); |
242 |
0 |
AN(tmp); |
243 |
0 |
memcpy(tmp, b, l); |
244 |
0 |
} else { |
245 |
0 |
AZ(e); |
246 |
0 |
l = 0; |
247 |
0 |
tmp = NULL; |
248 |
|
} |
249 |
|
|
250 |
0 |
if (rollback) |
251 |
0 |
WS_Rollback(ws, 0); |
252 |
|
|
253 |
0 |
r = WS_ReserveAll(ws); |
254 |
|
|
255 |
0 |
if (l > r) { |
256 |
0 |
free(tmp); |
257 |
0 |
return (-1); |
258 |
|
} |
259 |
|
|
260 |
0 |
if (l > 0) |
261 |
0 |
memcpy(ws->f, tmp, l); |
262 |
0 |
free(tmp); |
263 |
0 |
return (l); |
264 |
0 |
} |
265 |
|
|
266 |
|
static struct ws_alloc * |
267 |
0 |
ws_emu_alloc(struct ws *ws, unsigned len) |
268 |
|
{ |
269 |
|
struct ws_emu *we; |
270 |
|
struct ws_alloc *wa; |
271 |
0 |
size_t off = 0; |
272 |
|
|
273 |
0 |
WS_Assert(ws); |
274 |
0 |
AZ(ws->r); |
275 |
|
|
276 |
0 |
we = ws_emu(ws); |
277 |
0 |
wa = VTAILQ_LAST(&we->head, ws_alloc_head); |
278 |
0 |
CHECK_OBJ_ORNULL(wa, WS_ALLOC_MAGIC); |
279 |
|
|
280 |
0 |
if (wa != NULL) |
281 |
0 |
off = wa->off + PRNDUP(wa->len); |
282 |
0 |
if (off + len > we->len) { |
283 |
0 |
WS_MarkOverflow(ws); |
284 |
0 |
return (NULL); |
285 |
|
} |
286 |
0 |
if (len == 0) |
287 |
0 |
len = we->len - off; |
288 |
|
|
289 |
0 |
ALLOC_OBJ(wa, WS_ALLOC_MAGIC); |
290 |
0 |
AN(wa); |
291 |
0 |
wa->off = off; |
292 |
0 |
wa->len = len; |
293 |
0 |
if (len > 0) { |
294 |
0 |
wa->ptr = malloc(len); |
295 |
0 |
AN(wa->ptr); |
296 |
0 |
} |
297 |
0 |
VTAILQ_INSERT_TAIL(&we->head, wa, list); |
298 |
0 |
return (wa); |
299 |
0 |
} |
300 |
|
|
301 |
|
void * |
302 |
0 |
WS_Alloc(struct ws *ws, unsigned bytes) |
303 |
|
{ |
304 |
|
struct ws_alloc *wa; |
305 |
|
|
306 |
0 |
assert(bytes > 0); |
307 |
0 |
wa = ws_emu_alloc(ws, bytes); |
308 |
0 |
WS_Assert(ws); |
309 |
0 |
if (wa != NULL) { |
310 |
0 |
AN(wa->ptr); |
311 |
0 |
DSLb(DBG_WORKSPACE, "WS_Alloc(%p, %u) = %p", |
312 |
|
ws, bytes, wa->ptr); |
313 |
0 |
return (wa->ptr); |
314 |
|
} |
315 |
0 |
return (NULL); |
316 |
0 |
} |
317 |
|
|
318 |
|
void * |
319 |
0 |
WS_Copy(struct ws *ws, const void *str, int len) |
320 |
|
{ |
321 |
|
struct ws_alloc *wa; |
322 |
|
|
323 |
0 |
AN(str); |
324 |
0 |
if (len == -1) |
325 |
0 |
len = strlen(str) + 1; |
326 |
0 |
assert(len > 0); |
327 |
0 |
wa = ws_emu_alloc(ws, len); |
328 |
0 |
WS_Assert(ws); |
329 |
0 |
if (wa != NULL) { |
330 |
0 |
AN(wa->ptr); |
331 |
0 |
memcpy(wa->ptr, str, len); |
332 |
0 |
DSLb(DBG_WORKSPACE, "WS_Copy(%p, %d) = %p", |
333 |
|
ws, len, wa->ptr); |
334 |
0 |
return (wa->ptr); |
335 |
|
} |
336 |
0 |
return (NULL); |
337 |
0 |
} |
338 |
|
|
339 |
|
uintptr_t |
340 |
0 |
WS_Snapshot(struct ws *ws) |
341 |
|
{ |
342 |
|
struct ws_emu *we; |
343 |
|
struct ws_alloc *wa; |
344 |
|
void *p; |
345 |
|
|
346 |
0 |
WS_Assert(ws); |
347 |
0 |
assert(ws->r == NULL); |
348 |
0 |
if (WS_Overflowed(ws)) { |
349 |
0 |
DSLb(DBG_WORKSPACE, "WS_Snapshot(%p) = overflowed", ws); |
350 |
0 |
return (snap_overflowed); |
351 |
|
} |
352 |
|
|
353 |
0 |
we = ws_emu(ws); |
354 |
0 |
wa = VTAILQ_LAST(&we->head, ws_alloc_head); |
355 |
0 |
CHECK_OBJ_ORNULL(wa, WS_ALLOC_MAGIC); |
356 |
0 |
p = (wa == NULL ? ws->s : wa->ptr); |
357 |
0 |
DSLb(DBG_WORKSPACE, "WS_Snapshot(%p) = %p", ws, p); |
358 |
0 |
return ((uintptr_t)p); |
359 |
0 |
} |
360 |
|
|
361 |
|
unsigned |
362 |
0 |
WS_ReserveAll(struct ws *ws) |
363 |
|
{ |
364 |
|
struct ws_alloc *wa; |
365 |
|
unsigned b; |
366 |
|
|
367 |
0 |
wa = ws_emu_alloc(ws, 0); |
368 |
0 |
AN(wa); |
369 |
|
|
370 |
0 |
if (wa->ptr != NULL) { |
371 |
0 |
AN(wa->len); |
372 |
0 |
ws->f = wa->ptr; |
373 |
0 |
ws->r = ws->f + wa->len; |
374 |
0 |
} else { |
375 |
0 |
ws->f = ws->r = ws->e; |
376 |
|
} |
377 |
|
|
378 |
0 |
b = pdiff(ws->f, ws->r); |
379 |
0 |
DSLb(DBG_WORKSPACE, "WS_ReserveAll(%p) = %u", ws, b); |
380 |
0 |
WS_Assert(ws); |
381 |
0 |
return (b); |
382 |
|
} |
383 |
|
|
384 |
|
unsigned |
385 |
0 |
WS_ReserveSize(struct ws *ws, unsigned bytes) |
386 |
|
{ |
387 |
|
struct ws_emu *we; |
388 |
|
struct ws_alloc *wa; |
389 |
|
|
390 |
0 |
assert(bytes > 0); |
391 |
0 |
wa = ws_emu_alloc(ws, bytes); |
392 |
0 |
if (wa == NULL) |
393 |
0 |
return (0); |
394 |
|
|
395 |
0 |
AN(wa->ptr); |
396 |
0 |
assert(wa->len == bytes); |
397 |
0 |
ws->f = wa->ptr; |
398 |
0 |
ws->r = ws->f + bytes; |
399 |
0 |
we = ws_emu(ws); |
400 |
0 |
DSLb(DBG_WORKSPACE, "WS_ReserveSize(%p, %u/%u) = %u", |
401 |
|
ws, bytes, we->len - wa->off, bytes); |
402 |
0 |
WS_Assert(ws); |
403 |
0 |
return (bytes); |
404 |
0 |
} |
405 |
|
|
406 |
|
static void |
407 |
0 |
ws_release(struct ws *ws, unsigned bytes) |
408 |
|
{ |
409 |
|
struct ws_emu *we; |
410 |
|
struct ws_alloc *wa; |
411 |
|
|
412 |
0 |
WS_Assert(ws); |
413 |
0 |
AN(ws->f); |
414 |
0 |
AN(ws->r); |
415 |
0 |
we = ws_emu(ws); |
416 |
0 |
wa = VTAILQ_LAST(&we->head, ws_alloc_head); |
417 |
0 |
AN(wa); |
418 |
0 |
assert(bytes <= wa->len); |
419 |
0 |
ws->f = ws->r = NULL; |
420 |
|
|
421 |
0 |
if (bytes == 0) { |
422 |
0 |
ws_alloc_free(we, &wa); |
423 |
0 |
return; |
424 |
|
} |
425 |
|
|
426 |
0 |
AN(wa->ptr); |
427 |
|
#ifdef ASAN_POISON_MEMORY_REGION |
428 |
|
ASAN_POISON_MEMORY_REGION(wa->ptr + bytes, wa->len - bytes); |
429 |
|
#endif |
430 |
0 |
wa->len = bytes; |
431 |
0 |
WS_Assert(ws); |
432 |
0 |
} |
433 |
|
|
434 |
|
void |
435 |
0 |
WS_Release(struct ws *ws, unsigned bytes) |
436 |
|
{ |
437 |
|
|
438 |
0 |
ws_release(ws, bytes); |
439 |
0 |
DSLb(DBG_WORKSPACE, "WS_Release(%p, %u)", ws, bytes); |
440 |
0 |
} |
441 |
|
|
442 |
|
void |
443 |
0 |
WS_ReleaseP(struct ws *ws, const char *ptr) |
444 |
|
{ |
445 |
|
unsigned l; |
446 |
|
|
447 |
0 |
WS_Assert(ws); |
448 |
0 |
assert(ws->r != NULL); |
449 |
0 |
assert(ptr >= ws->f); |
450 |
0 |
assert(ptr <= ws->r); |
451 |
0 |
l = pdiff(ws->f, ptr); |
452 |
0 |
ws_release(ws, l); |
453 |
0 |
DSLb(DBG_WORKSPACE, "WS_ReleaseP(%p, %p (%u))", ws, ptr, l); |
454 |
0 |
} |
455 |
|
|
456 |
|
void * |
457 |
0 |
WS_AtOffset(const struct ws *ws, unsigned off, unsigned len) |
458 |
|
{ |
459 |
|
struct ws_emu *we; |
460 |
|
struct ws_alloc *wa; |
461 |
|
|
462 |
0 |
WS_Assert(ws); |
463 |
0 |
we = ws_emu(ws); |
464 |
|
|
465 |
0 |
VTAILQ_FOREACH(wa, &we->head, list) { |
466 |
0 |
if (wa->off == off) { |
467 |
0 |
assert(wa->len >= len); |
468 |
0 |
return (wa->ptr); |
469 |
|
} |
470 |
0 |
} |
471 |
|
|
472 |
0 |
WRONG("invalid offset"); |
473 |
0 |
NEEDLESS(return (NULL)); |
474 |
0 |
} |
475 |
|
|
476 |
|
unsigned |
477 |
0 |
WS_ReservationOffset(const struct ws *ws) |
478 |
|
{ |
479 |
|
struct ws_emu *we; |
480 |
|
struct ws_alloc *wa; |
481 |
|
|
482 |
0 |
WS_Assert(ws); |
483 |
0 |
AN(ws->f); |
484 |
0 |
AN(ws->r); |
485 |
0 |
we = ws_emu(ws); |
486 |
0 |
wa = VTAILQ_LAST(&we->head, ws_alloc_head); |
487 |
0 |
AN(wa); |
488 |
0 |
return (wa->off); |
489 |
|
} |
490 |
|
|
491 |
|
unsigned |
492 |
0 |
WS_Dump(const struct ws *ws, char where, size_t off, void *buf, size_t len) |
493 |
|
{ |
494 |
|
struct ws_emu *we; |
495 |
|
struct ws_alloc *wa; |
496 |
|
unsigned l; |
497 |
|
char *b; |
498 |
|
|
499 |
0 |
WS_Assert(ws); |
500 |
0 |
AN(buf); |
501 |
0 |
AN(len); |
502 |
|
|
503 |
0 |
if (strchr("sfr", where) == NULL) { |
504 |
0 |
errno = EINVAL; |
505 |
0 |
return (0); |
506 |
|
} |
507 |
|
|
508 |
0 |
if (where == 'r' && ws->r == NULL) { |
509 |
0 |
errno = EAGAIN; |
510 |
0 |
return (0); |
511 |
|
} |
512 |
|
|
513 |
0 |
we = ws_emu(ws); |
514 |
0 |
wa = VTAILQ_LAST(&we->head, ws_alloc_head); |
515 |
|
|
516 |
0 |
l = we->len; |
517 |
0 |
if (where != 's' && wa != NULL) { |
518 |
0 |
l -= wa->off; |
519 |
0 |
if (where == 'f') |
520 |
0 |
l -= wa->len; |
521 |
0 |
} |
522 |
|
|
523 |
0 |
if (off > l) { |
524 |
0 |
errno = EFAULT; |
525 |
0 |
return (0); |
526 |
|
} |
527 |
|
|
528 |
0 |
b = buf; |
529 |
0 |
if (where == 'f' && ws->r != NULL) { |
530 |
0 |
if (l > len) |
531 |
0 |
l = len; |
532 |
0 |
AN(wa); |
533 |
0 |
memcpy(b, wa->ptr, l); |
534 |
0 |
b += l; |
535 |
0 |
len -= l; |
536 |
0 |
} |
537 |
|
|
538 |
0 |
if (where == 's') { |
539 |
0 |
VTAILQ_FOREACH(wa, &we->head, list) { |
540 |
0 |
if (len == 0) |
541 |
0 |
break; |
542 |
0 |
if (wa->ptr == NULL) |
543 |
0 |
break; |
544 |
0 |
l = vmin_t(size_t, wa->len, len); |
545 |
0 |
memcpy(b, wa->ptr, l); |
546 |
0 |
b += l; |
547 |
0 |
len -= l; |
548 |
0 |
} |
549 |
0 |
} |
550 |
|
|
551 |
0 |
if (len > 0) |
552 |
0 |
memset(b, 0xa5, len); |
553 |
0 |
return (l); |
554 |
0 |
} |
555 |
|
|
556 |
|
static void |
557 |
0 |
ws_emu_panic(struct vsb *vsb, const struct ws *ws) |
558 |
|
{ |
559 |
|
const struct ws_emu *we; |
560 |
|
const struct ws_alloc *wa; |
561 |
|
|
562 |
0 |
we = (void *)ws->s; |
563 |
0 |
if (PAN_dump_once(vsb, we, WS_EMU_MAGIC, "ws_emu")) |
564 |
0 |
return; |
565 |
0 |
VSB_printf(vsb, "len = %u,\n", we->len); |
566 |
|
|
567 |
0 |
VTAILQ_FOREACH(wa, &we->head, list) { |
568 |
0 |
if (PAN_dump_once_oneline(vsb, wa, WS_ALLOC_MAGIC, "ws_alloc")) |
569 |
0 |
break; |
570 |
0 |
VSB_printf(vsb, "off, len, ptr} = {%u, %u, %p}\n", |
571 |
0 |
wa->off, wa->len, wa->ptr); |
572 |
0 |
} |
573 |
|
|
574 |
0 |
VSB_indent(vsb, -2); |
575 |
0 |
VSB_cat(vsb, "},\n"); |
576 |
0 |
} |
577 |
|
|
578 |
|
void |
579 |
0 |
WS_Panic(struct vsb *vsb, const struct ws *ws) |
580 |
|
{ |
581 |
|
|
582 |
0 |
if (PAN_dump_struct(vsb, ws, WS_MAGIC, "ws")) |
583 |
0 |
return; |
584 |
0 |
if (ws->id[0] != '\0' && (!(ws->id[0] & 0x20))) // cheesy islower() |
585 |
0 |
VSB_cat(vsb, "OVERFLOWED "); |
586 |
0 |
VSB_printf(vsb, "id = \"%s\",\n", ws->id); |
587 |
0 |
VSB_printf(vsb, "{s, e} = {%p", ws->s); |
588 |
0 |
if (ws->e >= ws->s) |
589 |
0 |
VSB_printf(vsb, ", +%ld", (long) (ws->e - ws->s)); |
590 |
|
else |
591 |
0 |
VSB_printf(vsb, ", %p", ws->e); |
592 |
0 |
VSB_cat(vsb, "},\n"); |
593 |
0 |
VSB_printf(vsb, "{f, r} = {%p", ws->f); |
594 |
0 |
if (ws->r >= ws->f) |
595 |
0 |
VSB_printf(vsb, ", +%ld", (long) (ws->r - ws->f)); |
596 |
|
else |
597 |
0 |
VSB_printf(vsb, ", %p", ws->r); |
598 |
0 |
VSB_cat(vsb, "},\n"); |
599 |
|
|
600 |
0 |
ws_emu_panic(vsb, ws); |
601 |
|
|
602 |
0 |
VSB_indent(vsb, -2); |
603 |
0 |
VSB_cat(vsb, "},\n"); |
604 |
0 |
} |
605 |
|
|
606 |
|
#endif /* ENABLE_WORKSPACE_EMULATOR */ |