| | varnish-cache/bin/varnishd/cache/cache.h |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
* |
30 |
|
*/ |
31 |
|
|
32 |
|
#ifdef VRT_H_INCLUDED |
33 |
|
# error "vrt.h included before cache.h - they are exclusive" |
34 |
|
#endif |
35 |
|
|
36 |
|
#ifdef CACHE_H_INCLUDED |
37 |
|
# error "cache.h included multiple times" |
38 |
|
#endif |
39 |
|
|
40 |
|
#include <math.h> |
41 |
|
#include <pthread.h> |
42 |
|
#include <stdarg.h> |
43 |
|
#include <sys/types.h> |
44 |
|
#include <sys/uio.h> |
45 |
|
|
46 |
|
#include "vdef.h" |
47 |
|
#include "vrt.h" |
48 |
|
|
49 |
|
#define CACHE_H_INCLUDED // After vrt.h include. |
50 |
|
|
51 |
|
#include "miniobj.h" |
52 |
|
#include "vas.h" |
53 |
|
#include "vqueue.h" |
54 |
|
#include "vtree.h" |
55 |
|
|
56 |
|
#include "vapi/vsl_int.h" |
57 |
|
|
58 |
|
/*--------------------------------------------------------------------*/ |
59 |
|
|
60 |
|
struct vxids { |
61 |
|
uint64_t vxid; |
62 |
|
}; |
63 |
|
|
64 |
|
typedef struct vxids vxid_t; |
65 |
|
|
66 |
|
#define NO_VXID ((struct vxids){0}) |
67 |
|
#define IS_NO_VXID(x) ((x).vxid == 0) |
68 |
|
#define VXID_TAG(x) ((uintmax_t)((x).vxid & (VSL_CLIENTMARKER|VSL_BACKENDMARKER))) |
69 |
|
#define VXID(u) ((uintmax_t)((u.vxid) & VSL_IDENTMASK)) |
70 |
|
#define IS_SAME_VXID(x, y) ((x).vxid == (y).vxid) |
71 |
|
|
72 |
|
/*--------------------------------------------------------------------*/ |
73 |
|
|
74 |
|
struct body_status { |
75 |
|
const char *name; |
76 |
|
int nbr; |
77 |
|
int avail; |
78 |
|
int length_known; |
79 |
|
}; |
80 |
|
|
81 |
|
#define BODYSTATUS(U, l, n, a, k) extern const struct body_status BS_##U[1]; |
82 |
|
#include "tbl/body_status.h" |
83 |
|
|
84 |
|
typedef const struct body_status *body_status_t; |
85 |
|
|
86 |
|
/*--------------------------------------------------------------------*/ |
87 |
|
|
88 |
|
struct stream_close { |
89 |
|
unsigned magic; |
90 |
|
#define STREAM_CLOSE_MAGIC 0xc879c93d |
91 |
|
int idx; |
92 |
|
unsigned is_err; |
93 |
|
const char *name; |
94 |
|
const char *desc; |
95 |
|
}; |
96 |
|
extern const struct stream_close SC_NULL[1]; |
97 |
|
#define SESS_CLOSE(nm, stat, err, desc) \ |
98 |
|
extern const struct stream_close SC_##nm[1]; |
99 |
|
#include "tbl/sess_close.h" |
100 |
|
|
101 |
|
|
102 |
|
/*-------------------------------------------------------------------- |
103 |
|
* Indices into http->hd[] |
104 |
|
*/ |
105 |
|
enum { |
106 |
|
#define SLTH(tag, ind, req, resp, sdesc, ldesc) ind, |
107 |
|
#include "tbl/vsl_tags_http.h" |
108 |
|
}; |
109 |
|
|
110 |
|
/*--------------------------------------------------------------------*/ |
111 |
|
|
112 |
|
struct ban; |
113 |
|
struct ban_proto; |
114 |
|
struct cli; |
115 |
|
struct http_conn; |
116 |
|
struct listen_sock; |
117 |
|
struct mempool; |
118 |
|
struct objcore; |
119 |
|
struct objhead; |
120 |
|
struct pool; |
121 |
|
struct req_step; |
122 |
|
struct sess; |
123 |
|
struct transport; |
124 |
|
struct vcf; |
125 |
|
struct VSC_lck; |
126 |
|
struct VSC_main; |
127 |
|
struct VSC_main_wrk; |
128 |
|
struct worker; |
129 |
|
struct worker_priv; |
130 |
|
|
131 |
|
#define DIGEST_LEN 32 |
132 |
|
|
133 |
|
/*--------------------------------------------------------------------*/ |
134 |
|
|
135 |
|
struct lock { void *priv; }; // Opaque |
136 |
|
|
137 |
|
/*-------------------------------------------------------------------- |
138 |
|
* Workspace structure for quick memory allocation. |
139 |
|
*/ |
140 |
|
|
141 |
|
#define WS_ID_SIZE 4 |
142 |
|
|
143 |
|
struct ws { |
144 |
|
unsigned magic; |
145 |
|
#define WS_MAGIC 0x35fac554 |
146 |
|
char id[WS_ID_SIZE]; /* identity */ |
147 |
|
char *s; /* (S)tart of buffer */ |
148 |
|
char *f; /* (F)ree/front pointer */ |
149 |
|
char *r; /* (R)eserved length */ |
150 |
|
char *e; /* (E)nd of buffer */ |
151 |
|
}; |
152 |
|
|
153 |
|
/*-------------------------------------------------------------------- |
154 |
|
* |
155 |
|
*/ |
156 |
|
|
157 |
|
struct http { |
158 |
|
unsigned magic; |
159 |
|
#define HTTP_MAGIC 0x6428b5c9 |
160 |
|
|
161 |
|
uint16_t shd; /* Size of hd space */ |
162 |
|
txt *hd; |
163 |
|
unsigned char *hdf; |
164 |
|
#define HDF_FILTER (1 << 0) /* Filtered by Connection */ |
165 |
|
|
166 |
|
/* NB: ->nhd and below zeroed/initialized by http_Teardown */ |
167 |
|
uint16_t nhd; /* Next free hd */ |
168 |
|
|
169 |
|
enum VSL_tag_e logtag; /* Must be SLT_*Method */ |
170 |
|
struct vsl_log *vsl; |
171 |
|
|
172 |
|
struct ws *ws; |
173 |
|
uint16_t status; |
174 |
|
uint8_t protover; |
175 |
|
}; |
176 |
|
|
177 |
|
/*--------------------------------------------------------------------*/ |
178 |
|
|
179 |
|
struct acct_req { |
180 |
|
#define ACCT(foo) uint64_t foo; |
181 |
|
#include "tbl/acct_fields_req.h" |
182 |
|
}; |
183 |
|
|
184 |
|
/*--------------------------------------------------------------------*/ |
185 |
|
|
186 |
|
struct acct_bereq { |
187 |
|
#define ACCT(foo) uint64_t foo; |
188 |
|
#include "tbl/acct_fields_bereq.h" |
189 |
|
}; |
190 |
|
|
191 |
|
/*--------------------------------------------------------------------*/ |
192 |
|
|
193 |
|
struct vsl_log { |
194 |
|
uint32_t *wlb, *wlp, *wle; |
195 |
|
unsigned wlr; |
196 |
|
vxid_t wid; |
197 |
|
}; |
198 |
|
|
199 |
|
/*--------------------------------------------------------------------*/ |
200 |
|
|
201 |
|
VRBT_HEAD(vrt_privs, vrt_priv); |
202 |
|
|
203 |
|
/* Worker pool stuff -------------------------------------------------*/ |
204 |
|
|
205 |
|
typedef void task_func_t(struct worker *wrk, void *priv); |
206 |
|
|
207 |
|
struct pool_task { |
208 |
|
VTAILQ_ENTRY(pool_task) list; |
209 |
|
task_func_t *func; |
210 |
|
void *priv; |
211 |
|
}; |
212 |
|
|
213 |
|
/* |
214 |
|
* tasks are taken off the queues in this order |
215 |
|
* |
216 |
|
* TASK_QUEUE_{REQ|STR} are new req's (H1/H2), and subject to queue limit. |
217 |
|
* |
218 |
|
* TASK_QUEUE_RUSH is req's returning from waiting list |
219 |
|
* |
220 |
|
* NOTE: When changing the number of classes, update places marked with |
221 |
|
* TASK_QUEUE_RESERVE in params.h |
222 |
|
*/ |
223 |
|
enum task_prio { |
224 |
|
TASK_QUEUE_BO, |
225 |
|
TASK_QUEUE_RUSH, |
226 |
|
TASK_QUEUE_REQ, |
227 |
|
TASK_QUEUE_STR, |
228 |
|
TASK_QUEUE_VCA, |
229 |
|
TASK_QUEUE_BG, |
230 |
|
TASK_QUEUE__END |
231 |
|
}; |
232 |
|
|
233 |
|
#define TASK_QUEUE_HIGHEST_PRIORITY TASK_QUEUE_BO |
234 |
|
#define TASK_QUEUE_RESERVE TASK_QUEUE_BG |
235 |
|
#define TASK_QUEUE_LIMITED(prio) \ |
236 |
|
(prio == TASK_QUEUE_REQ || prio == TASK_QUEUE_STR) |
237 |
|
|
238 |
|
/*--------------------------------------------------------------------*/ |
239 |
|
|
240 |
|
struct worker { |
241 |
|
unsigned magic; |
242 |
|
#define WORKER_MAGIC 0x6391adcf |
243 |
|
int strangelove; |
244 |
|
struct worker_priv *wpriv; |
245 |
|
struct pool *pool; |
246 |
|
struct VSC_main_wrk *stats; |
247 |
|
struct vsl_log *vsl; // borrowed from req/bo |
248 |
|
|
249 |
|
struct pool_task task[1]; |
250 |
|
|
251 |
|
vtim_real lastused; |
252 |
|
|
253 |
|
pthread_cond_t cond; |
254 |
|
|
255 |
|
struct ws aws[1]; |
256 |
|
|
257 |
|
unsigned cur_method; |
258 |
|
unsigned seen_methods; |
259 |
|
|
260 |
|
struct wrk_vpi *vpi; |
261 |
|
}; |
262 |
|
|
263 |
|
/* Stored object ----------------------------------------------------- |
264 |
|
* This is just to encapsulate the fields owned by the stevedore |
265 |
|
*/ |
266 |
|
|
267 |
|
struct storeobj { |
268 |
|
const struct stevedore *stevedore; |
269 |
|
void *priv; |
270 |
|
uint64_t priv2; |
271 |
|
}; |
272 |
|
|
273 |
|
/* Busy Objcore structure -------------------------------------------- |
274 |
|
* |
275 |
|
*/ |
276 |
|
|
277 |
|
/* |
278 |
|
* The macro-states we expose outside the fetch code |
279 |
|
*/ |
280 |
|
enum boc_state_e { |
281 |
|
#define BOC_STATE(U, l) BOS_##U, |
282 |
|
#include "tbl/boc_state.h" |
283 |
|
}; |
284 |
|
|
285 |
|
// cache_obj.h vai notify |
286 |
|
struct vai_qe; |
287 |
|
VSLIST_HEAD(vai_q_head, vai_qe); |
288 |
|
|
289 |
|
struct boc { |
290 |
|
unsigned magic; |
291 |
|
#define BOC_MAGIC 0x70c98476 |
292 |
|
unsigned refcount; |
293 |
|
struct lock mtx; |
294 |
|
pthread_cond_t cond; |
295 |
|
void *stevedore_priv; |
296 |
|
enum boc_state_e state; |
297 |
|
uint8_t *vary; |
298 |
|
uint64_t fetched_so_far; |
299 |
|
uint64_t delivered_so_far; |
300 |
|
uint64_t transit_buffer; |
301 |
|
struct vai_q_head vai_q_head; |
302 |
|
}; |
303 |
|
|
304 |
|
/* Object core structure --------------------------------------------- |
305 |
|
* Objects have sideways references in the binary heap and the LRU list |
306 |
|
* and we want to avoid paging in a lot of objects just to move them up |
307 |
|
* or down the binheap or to move a unrelated object on the LRU list. |
308 |
|
* To avoid this we use a proxy object, objcore, to hold the relevant |
309 |
|
* housekeeping fields parts of an object. |
310 |
|
*/ |
311 |
|
|
312 |
|
enum obj_attr { |
313 |
|
#define OBJ_FIXATTR(U, l, s) OA_##U, |
314 |
|
#define OBJ_VARATTR(U, l) OA_##U, |
315 |
|
#define OBJ_AUXATTR(U, l) OA_##U, |
316 |
|
#include "tbl/obj_attr.h" |
317 |
|
OA__MAX, |
318 |
|
}; |
319 |
|
|
320 |
|
enum obj_flags { |
321 |
|
#define OBJ_FLAG(U, l, v) OF_##U = v, |
322 |
|
#include "tbl/obj_attr.h" |
323 |
|
}; |
324 |
|
|
325 |
|
enum oc_flags { |
326 |
|
#define OC_FLAG(U, l, v) OC_F_##U = v, |
327 |
|
#include "tbl/oc_flags.h" |
328 |
|
}; |
329 |
|
|
330 |
|
#define OC_F_TRANSIENT (OC_F_PRIVATE | OC_F_HFM | OC_F_HFP) |
331 |
|
|
332 |
|
enum oc_exp_flags { |
333 |
|
#define OC_EXP_FLAG(U, l, v) OC_EF_##U = v, |
334 |
|
#include "tbl/oc_exp_flags.h" |
335 |
|
}; |
336 |
|
|
337 |
|
struct objcore { |
338 |
|
unsigned magic; |
339 |
|
#define OBJCORE_MAGIC 0x4d301302 |
340 |
|
int refcnt; |
341 |
|
struct storeobj stobj[1]; |
342 |
|
struct objhead *objhead; |
343 |
|
struct boc *boc; |
344 |
|
vtim_real timer_when; |
345 |
|
VCL_INT hits; |
346 |
|
|
347 |
|
|
348 |
|
vtim_real t_origin; |
349 |
|
float ttl; |
350 |
|
float grace; |
351 |
|
float keep; |
352 |
|
|
353 |
|
uint8_t flags; |
354 |
|
|
355 |
|
uint8_t exp_flags; |
356 |
|
|
357 |
|
uint16_t oa_present; |
358 |
|
|
359 |
|
unsigned timer_idx; // XXX 4Gobj limit |
360 |
|
vtim_real last_lru; |
361 |
|
VTAILQ_ENTRY(objcore) hsh_list; |
362 |
|
VTAILQ_ENTRY(objcore) lru_list; |
363 |
|
VTAILQ_ENTRY(objcore) ban_list; |
364 |
|
VSTAILQ_ENTRY(objcore) exp_list; |
365 |
|
struct ban *ban; |
366 |
|
}; |
367 |
|
|
368 |
|
/* Busy Object structure --------------------------------------------- |
369 |
|
* |
370 |
|
* The busyobj structure captures the aspects of an object related to, |
371 |
|
* and while it is being fetched from the backend. |
372 |
|
* |
373 |
|
* One of these aspects will be how much has been fetched, which |
374 |
|
* streaming delivery will make use of. |
375 |
|
*/ |
376 |
|
|
377 |
|
enum director_state_e { |
378 |
|
DIR_S_NULL = 0, |
379 |
|
DIR_S_HDRS = 1, |
380 |
|
DIR_S_BODY = 2, |
381 |
|
}; |
382 |
|
|
383 |
|
struct busyobj { |
384 |
|
unsigned magic; |
385 |
|
#define BUSYOBJ_MAGIC 0x23b95567 |
386 |
|
|
387 |
|
char *end; |
388 |
|
|
389 |
|
unsigned max_retries; |
390 |
|
unsigned retries; |
391 |
|
struct req *req; |
392 |
|
struct sess *sp; |
393 |
|
struct worker *wrk; |
394 |
|
|
395 |
|
/* beresp.body */ |
396 |
|
struct vfp_ctx *vfc; |
397 |
|
const char *vfp_filter_list; |
398 |
|
/* bereq.body */ |
399 |
|
const char *vdp_filter_list; |
400 |
|
|
401 |
|
struct ws ws[1]; |
402 |
|
uintptr_t ws_bo; |
403 |
|
struct http *bereq0; |
404 |
|
struct http *bereq; |
405 |
|
struct http *beresp; |
406 |
|
struct objcore *bereq_body; |
407 |
|
struct objcore *stale_oc; |
408 |
|
struct objcore *fetch_objcore; |
409 |
|
|
410 |
|
const char *no_retry; |
411 |
|
|
412 |
|
struct http_conn *htc; |
413 |
|
|
414 |
|
struct pool_task fetch_task[1]; |
415 |
|
|
416 |
|
const char *err_reason; |
417 |
|
enum director_state_e director_state; |
418 |
|
uint16_t err_code; |
419 |
|
|
420 |
|
#define BERESP_FLAG(l, r, w, f, d) unsigned l:1; |
421 |
|
#define BEREQ_FLAG(l, r, w, d) BERESP_FLAG(l, r, w, 0, d) |
422 |
|
#include "tbl/bereq_flags.h" |
423 |
|
#include "tbl/beresp_flags.h" |
424 |
|
|
425 |
|
|
426 |
|
/* Timeouts */ |
427 |
|
vtim_dur connect_timeout; |
428 |
|
vtim_dur first_byte_timeout; |
429 |
|
vtim_dur between_bytes_timeout; |
430 |
|
vtim_dur task_deadline; |
431 |
|
|
432 |
|
/* Timers */ |
433 |
|
vtim_real t_first; /* First timestamp logged */ |
434 |
|
vtim_real t_resp; /* response received */ |
435 |
|
vtim_real t_prev; /* Previous timestamp logged */ |
436 |
|
|
437 |
|
/* Acct */ |
438 |
|
struct acct_bereq acct; |
439 |
|
|
440 |
|
const struct stevedore *storage; |
441 |
|
const struct director *director_req; |
442 |
|
const struct director *director_resp; |
443 |
|
struct vcl *vcl; |
444 |
|
|
445 |
|
struct vsl_log vsl[1]; |
446 |
|
|
447 |
|
uint8_t digest[DIGEST_LEN]; |
448 |
|
struct vrt_privs privs[1]; |
449 |
|
|
450 |
|
const char *client_identity; |
451 |
|
}; |
452 |
|
|
453 |
|
#define BUSYOBJ_TMO(bo, pfx, tmo) \ |
454 |
|
(isnan((bo)->tmo) ? cache_param->pfx##tmo : (bo)->tmo) |
455 |
|
|
456 |
|
|
457 |
|
/*--------------------------------------------------------------------*/ |
458 |
|
|
459 |
|
struct reqtop { |
460 |
|
unsigned magic; |
461 |
|
#define REQTOP_MAGIC 0x57fbda52 |
462 |
|
struct req *topreq; |
463 |
|
struct vcl *vcl0; |
464 |
|
struct vrt_privs privs[1]; |
465 |
|
}; |
466 |
|
|
467 |
|
struct req { |
468 |
|
unsigned magic; |
469 |
|
#define REQ_MAGIC 0xfb4abf6d |
470 |
|
|
471 |
|
unsigned esi_level; |
472 |
|
body_status_t req_body_status; |
473 |
|
stream_close_t doclose; |
474 |
|
unsigned restarts; |
475 |
|
unsigned max_restarts; |
476 |
|
|
477 |
|
const struct req_step *req_step; |
478 |
|
struct reqtop *top; /* esi_level == 0 request */ |
479 |
|
|
480 |
|
uint16_t err_code; |
481 |
|
#define REQ_FLAG(l, r, w, d) unsigned l:1; |
482 |
|
#include "tbl/req_flags.h" |
483 |
|
|
484 |
|
const char *err_reason; |
485 |
|
|
486 |
|
struct sess *sp; |
487 |
|
struct worker *wrk; |
488 |
|
struct pool_task task[1]; |
489 |
|
|
490 |
|
const struct transport *transport; |
491 |
|
void *transport_priv; |
492 |
|
|
493 |
|
VTAILQ_ENTRY(req) w_list; |
494 |
|
|
495 |
|
struct objcore *body_oc; |
496 |
|
|
497 |
|
/* The busy objhead we sleep on */ |
498 |
|
struct objhead *hash_objhead; |
499 |
|
|
500 |
|
/* Built Vary string == workspace reservation */ |
501 |
|
uint8_t *vary_b; |
502 |
|
uint8_t *vary_e; |
503 |
|
|
504 |
|
uint8_t digest[DIGEST_LEN]; |
505 |
|
|
506 |
|
vtim_dur d_ttl; |
507 |
|
vtim_dur d_grace; |
508 |
|
|
509 |
|
const struct stevedore *storage; |
510 |
|
|
511 |
|
const struct director *director_hint; |
512 |
|
struct vcl *vcl; |
513 |
|
|
514 |
|
uintptr_t ws_req; /* WS above request data */ |
515 |
|
|
516 |
|
/* Timestamps */ |
517 |
|
vtim_real t_first; /* First timestamp logged */ |
518 |
|
vtim_real t_prev; /* Previous timestamp logged */ |
519 |
|
vtim_real t_req; /* Headers complete */ |
520 |
|
vtim_real t_resp; /* Entry to last deliver/synth */ |
521 |
|
|
522 |
|
struct http_conn *htc; |
523 |
|
struct vfp_ctx *vfc; |
524 |
|
const char *client_identity; |
525 |
|
|
526 |
|
/* HTTP request */ |
527 |
|
struct http *http; |
528 |
|
struct http *http0; |
529 |
|
|
530 |
|
/* HTTP response */ |
531 |
|
struct http *resp; |
532 |
|
intmax_t resp_len; |
533 |
|
|
534 |
|
struct ws ws[1]; |
535 |
|
struct objcore *objcore; |
536 |
|
struct objcore *stale_oc; |
537 |
|
struct boc *boc; /* valid during cnt_transmit */ |
538 |
|
|
539 |
|
/* resp.body */ |
540 |
|
struct vdp_ctx *vdc; |
541 |
|
const char *vdp_filter_list; |
542 |
|
/* req.body */ |
543 |
|
const char *vfp_filter_list; |
544 |
|
|
545 |
|
/* Transaction VSL buffer */ |
546 |
|
struct vsl_log vsl[1]; |
547 |
|
|
548 |
|
/* Temporary accounting */ |
549 |
|
struct acct_req acct; |
550 |
|
|
551 |
|
struct vrt_privs privs[1]; |
552 |
|
|
553 |
|
struct vcf *vcf; |
554 |
|
}; |
555 |
|
|
556 |
|
#define IS_TOPREQ(req) ((req)->top->topreq == (req)) |
557 |
|
|
558 |
|
/*-------------------------------------------------------------------- |
559 |
|
* Struct sess is a high memory-load structure because sessions typically |
560 |
|
* hang around the waiter for relatively long time. |
561 |
|
* |
562 |
|
* The size goal for struct sess + struct memitem is <512 bytes |
563 |
|
* |
564 |
|
* Getting down to the next relevant size (<256 bytes because of how malloc |
565 |
|
* works, is not realistic without a lot of code changes. |
566 |
|
*/ |
567 |
|
|
568 |
|
enum sess_attr { |
569 |
|
#define SESS_ATTR(UP, low, typ, len) SA_##UP, |
570 |
|
#include "tbl/sess_attr.h" |
571 |
|
SA_LAST |
572 |
|
}; |
573 |
|
|
574 |
|
struct sess { |
575 |
|
unsigned magic; |
576 |
|
#define SESS_MAGIC 0x2c2f9c5a |
577 |
|
|
578 |
|
uint16_t sattr[SA_LAST]; |
579 |
|
struct listen_sock *listen_sock; |
580 |
|
int refcnt; |
581 |
|
int fd; |
582 |
|
vxid_t vxid; |
583 |
|
|
584 |
|
struct lock mtx; |
585 |
|
|
586 |
|
struct pool *pool; |
587 |
|
|
588 |
|
struct ws ws[1]; |
589 |
|
|
590 |
|
vtim_real t_open; /* fd accepted */ |
591 |
|
vtim_real t_idle; /* fd accepted or resp sent */ |
592 |
|
vtim_dur timeout_idle; |
593 |
|
vtim_dur timeout_linger; |
594 |
|
vtim_dur send_timeout; |
595 |
|
vtim_dur idle_send_timeout; |
596 |
|
}; |
597 |
|
|
598 |
|
#define SESS_TMO(sp, tmo) \ |
599 |
|
(isnan((sp)->tmo) ? cache_param->tmo : (sp)->tmo) |
600 |
|
|
601 |
|
/* Prototypes etc ----------------------------------------------------*/ |
602 |
|
|
603 |
|
|
604 |
|
/* cache_ban.c */ |
605 |
|
|
606 |
|
/* for constructing bans */ |
607 |
|
struct ban_proto *BAN_Build(void); |
608 |
|
const char *BAN_AddTest(struct ban_proto *, |
609 |
|
const char *, const char *, const char *); |
610 |
|
const char *BAN_Commit(struct ban_proto *b); |
611 |
|
void BAN_Abandon(struct ban_proto *b); |
612 |
|
|
613 |
|
/* cache_cli.c [CLI] */ |
614 |
|
extern pthread_t cli_thread; |
615 |
|
#define IS_CLI() (pthread_equal(pthread_self(), cli_thread)) |
616 |
|
#define ASSERT_CLI() do {assert(IS_CLI());} while (0) |
617 |
|
|
618 |
|
/* cache_http.c */ |
619 |
|
unsigned HTTP_estimate(unsigned nhttp); |
620 |
|
void HTTP_Clone(struct http *to, const struct http * const fm); |
621 |
|
void HTTP_Dup(struct http *to, const struct http * const fm); |
622 |
|
struct http *HTTP_create(void *p, uint16_t nhttp, unsigned); |
623 |
|
const char *http_Status2Reason(unsigned, const char **); |
624 |
|
int http_IsHdr(const txt *hh, hdr_t hdr); |
625 |
|
unsigned http_EstimateWS(const struct http *fm, unsigned how); |
626 |
|
void http_PutResponse(struct http *to, const char *proto, uint16_t status, |
627 |
|
const char *response); |
628 |
|
void http_FilterReq(struct http *to, const struct http *fm, unsigned how); |
629 |
|
void HTTP_Encode(const struct http *fm, uint8_t *, unsigned len, unsigned how); |
630 |
|
int HTTP_Decode(struct http *to, const uint8_t *fm); |
631 |
|
void http_ForceHeader(struct http *to, hdr_t, const char *val); |
632 |
|
void http_AppendHeader(struct http *to, hdr_t, const char *val); |
633 |
|
void http_PrintfHeader(struct http *to, const char *fmt, ...) |
634 |
|
v_printflike_(2, 3); |
635 |
|
void http_TimeHeader(struct http *to, const char *fmt, vtim_real now); |
636 |
|
const char * http_ViaHeader(void); |
637 |
|
void http_Proto(struct http *to); |
638 |
|
void http_SetHeader(struct http *to, const char *header); |
639 |
|
void http_SetH(struct http *to, unsigned n, const char *header); |
640 |
|
void http_ForceField(struct http *to, unsigned n, const char *t); |
641 |
|
void HTTP_Setup(struct http *, struct ws *, struct vsl_log *, enum VSL_tag_e); |
642 |
|
void http_Teardown(struct http *ht); |
643 |
|
int http_GetHdr(const struct http *hp, hdr_t, const char **ptr); |
644 |
|
int http_GetHdrToken(const struct http *hp, hdr_t, |
645 |
|
const char *token, const char **pb, const char **pe); |
646 |
|
int http_GetHdrField(const struct http *hp, hdr_t, |
647 |
|
const char *field, const char **ptr); |
648 |
|
double http_GetHdrQ(const struct http *hp, hdr_t, const char *field); |
649 |
|
ssize_t http_GetContentLength(const struct http *hp); |
650 |
|
ssize_t http_GetContentRange(const struct http *hp, ssize_t *lo, ssize_t *hi); |
651 |
|
const char * http_GetRange(const struct http *hp, ssize_t *lo, ssize_t *hi, |
652 |
|
ssize_t len); |
653 |
|
uint16_t http_GetStatus(const struct http *hp); |
654 |
|
int http_IsStatus(const struct http *hp, int); |
655 |
|
void http_SetStatus(struct http *to, uint16_t status, const char *reason); |
656 |
|
const char *http_GetMethod(const struct http *hp); |
657 |
|
int http_HdrIs(const struct http *hp, hdr_t, const char *val); |
658 |
|
void http_CopyHome(const struct http *hp); |
659 |
|
void http_Unset(struct http *hp, hdr_t); |
660 |
|
unsigned http_CountHdr(const struct http *hp, hdr_t); |
661 |
|
void http_CollectHdr(struct http *hp, hdr_t); |
662 |
|
void http_CollectHdrSep(struct http *hp, hdr_t, const char *sep); |
663 |
|
void http_VSL_log(const struct http *hp); |
664 |
|
void HTTP_Merge(struct worker *, struct objcore *, struct http *to); |
665 |
|
uint16_t HTTP_GetStatusPack(struct worker *, struct objcore *oc); |
666 |
|
int HTTP_IterHdrPack(struct worker *, struct objcore *, const char **); |
667 |
|
#define HTTP_FOREACH_PACK(wrk, oc, ptr) \ |
668 |
|
for ((ptr) = NULL; HTTP_IterHdrPack(wrk, oc, &(ptr));) |
669 |
|
const char *HTTP_GetHdrPack(struct worker *, struct objcore *, hdr_t); |
670 |
|
stream_close_t http_DoConnection(struct http *hp, stream_close_t sc_close); |
671 |
|
int http_IsFiltered(const struct http *hp, unsigned u, unsigned how); |
672 |
|
|
673 |
|
#define HTTPH_R_PASS (1 << 0) /* Request (c->b) in pass mode */ |
674 |
|
#define HTTPH_R_FETCH (1 << 1) /* Request (c->b) for fetch */ |
675 |
|
#define HTTPH_A_INS (1 << 2) /* Response (b->o) for insert */ |
676 |
|
#define HTTPH_A_PASS (1 << 3) /* Response (b->o) for pass */ |
677 |
|
#define HTTPH_C_SPECIFIC (1 << 4) /* Connection-specific */ |
678 |
|
|
679 |
|
#define HTTPH(a, b, c) extern hdr_t b; |
680 |
|
#include "tbl/http_headers.h" |
681 |
|
|
682 |
|
extern hdr_t H__Status; |
683 |
|
extern hdr_t H__Proto; |
684 |
|
extern hdr_t H__Reason; |
685 |
|
|
686 |
|
// rfc7233,l,1207,1208 |
687 |
|
#define http_tok_eq(s1, s2) (!vct_casecmp(s1, s2)) |
688 |
|
#define http_tok_at(s1, s2, l) (!vct_caselencmp(s1, s2, l)) |
689 |
|
#define http_ctok_at(s, cs) (!vct_caselencmp(s, cs, sizeof(cs) - 1)) |
690 |
|
|
691 |
|
// rfc7230,l,1037,1038 |
692 |
|
#define http_scheme_at(str, tok) http_ctok_at(str, #tok "://") |
693 |
|
|
694 |
|
// rfc7230,l,1144,1144 |
695 |
|
// rfc7231,l,1156,1158 |
696 |
|
#define http_method_eq(str, tok) (!strcmp(str, #tok)) |
697 |
|
|
698 |
|
// rfc7230,l,1222,1222 |
699 |
|
// rfc7230,l,2848,2848 |
700 |
|
// rfc7231,l,3883,3885 |
701 |
|
// rfc7234,l,1339,1340 |
702 |
|
// rfc7234,l,1418,1419 |
703 |
|
#define http_hdr_eq(s1, s2) http_tok_eq(s1, s2) |
704 |
|
#define http_hdr_at(s1, s2, l) http_tok_at(s1, s2, l) |
705 |
|
|
706 |
|
// rfc7230,l,1952,1952 |
707 |
|
// rfc7231,l,604,604 |
708 |
|
#define http_coding_eq(str, tok) http_tok_eq(str, #tok) |
709 |
|
|
710 |
|
// rfc7231,l,1864,1864 |
711 |
|
#define http_expect_eq(str, tok) http_tok_eq(str, #tok) |
712 |
|
|
713 |
|
// rfc7233,l,1207,1208 |
714 |
|
#define http_range_at(str, tok, l) http_tok_at(str, #tok, l) |
715 |
|
|
716 |
|
/* cache_lck.c */ |
717 |
|
|
718 |
|
/* Internal functions, call only through macros below */ |
719 |
|
void Lck__Lock(struct lock *lck, const char *p, int l); |
720 |
|
void Lck__Unlock(struct lock *lck, const char *p, int l); |
721 |
|
int Lck__Trylock(struct lock *lck, const char *p, int l); |
722 |
|
void Lck__New(struct lock *lck, struct VSC_lck *, const char *); |
723 |
|
int Lck__Held(const struct lock *lck); |
724 |
|
int Lck__Owned(const struct lock *lck); |
725 |
|
extern pthread_mutexattr_t mtxattr_errorcheck; |
726 |
|
|
727 |
|
/* public interface: */ |
728 |
|
void Lck_Delete(struct lock *lck); |
729 |
|
int Lck_CondWaitUntil(pthread_cond_t *, struct lock *, vtim_real when); |
730 |
|
int Lck_CondWait(pthread_cond_t *, struct lock *); |
731 |
|
int Lck_CondWaitTimeout(pthread_cond_t *, struct lock *, vtim_dur timeout); |
732 |
|
|
733 |
|
#define Lck_New(a, b) Lck__New(a, b, #b) |
734 |
|
#define Lck_Lock(a) Lck__Lock(a, __func__, __LINE__) |
735 |
|
#define Lck_Unlock(a) Lck__Unlock(a, __func__, __LINE__) |
736 |
|
#define Lck_Trylock(a) Lck__Trylock(a, __func__, __LINE__) |
737 |
|
#define Lck_AssertHeld(a) \ |
738 |
|
do { \ |
739 |
|
assert(Lck__Held(a)); \ |
740 |
|
assert(Lck__Owned(a)); \ |
741 |
|
} while (0) |
742 |
|
|
743 |
|
struct VSC_lck *Lck_CreateClass(struct vsc_seg **, const char *); |
744 |
|
void Lck_DestroyClass(struct vsc_seg **); |
745 |
|
|
746 |
|
#define LOCK(nam) extern struct VSC_lck *lck_##nam; |
747 |
|
#include "tbl/locks.h" |
748 |
|
|
749 |
|
/* cache_obj.c */ |
750 |
|
|
751 |
|
int ObjHasAttr(struct worker *, struct objcore *, enum obj_attr); |
752 |
|
const void *ObjGetAttr(struct worker *, struct objcore *, enum obj_attr, |
753 |
|
ssize_t *len); |
754 |
|
|
755 |
|
typedef int objiterate_f(void *priv, unsigned flush, |
756 |
|
const void *ptr, ssize_t len); |
757 |
|
#define OBJ_ITER_FLUSH 0x01 |
758 |
|
#define OBJ_ITER_END 0x02 |
759 |
|
|
760 |
|
int ObjIterate(struct worker *, struct objcore *, |
761 |
|
void *priv, objiterate_f *func, int final); |
762 |
|
|
763 |
|
vxid_t ObjGetXID(struct worker *, struct objcore *); |
764 |
|
uint64_t ObjGetLen(struct worker *, struct objcore *); |
765 |
|
int ObjGetDouble(struct worker *, struct objcore *, enum obj_attr, double *); |
766 |
|
int ObjGetU64(struct worker *, struct objcore *, enum obj_attr, uint64_t *); |
767 |
|
int ObjCheckFlag(struct worker *, struct objcore *, enum obj_flags of); |
768 |
|
|
769 |
|
/*==================================================================== |
770 |
|
* ObjVAI...(): Asynchronous Iteration |
771 |
|
* |
772 |
|
* see comments in cache_obj.c for usage |
773 |
|
*/ |
774 |
|
|
775 |
|
typedef void *vai_hdl; |
776 |
|
typedef void vai_notify_cb(vai_hdl, void *priv); |
777 |
|
|
778 |
|
|
779 |
|
/* |
780 |
|
* VSCARAB: Varnish SCatter ARAy of Buffers: |
781 |
|
* |
782 |
|
* an array of viovs, elsewhere also called an siov or sarray |
783 |
|
*/ |
784 |
|
struct viov { |
785 |
|
uint64_t lease; |
786 |
|
struct iovec iov; |
787 |
|
}; |
788 |
|
|
789 |
|
struct vscarab { |
790 |
|
unsigned magic; |
791 |
|
#define VSCARAB_MAGIC 0x05ca7ab0 |
792 |
|
unsigned flags; |
793 |
|
#define VSCARAB_F_END 1 // last viov is last overall |
794 |
|
unsigned capacity; |
795 |
|
unsigned used; |
796 |
|
struct viov s[] v_counted_by_(capacity); |
797 |
|
}; |
798 |
|
|
799 |
|
// VFLA: starting generic container-with-flexible-array-member macros |
800 |
|
// aka "struct hack" |
801 |
|
// |
802 |
|
// type : struct name |
803 |
|
// name : a pointer to struct type |
804 |
|
// mag : the magic value for this VFLA |
805 |
|
// cptr : pointer to container struct (aka "head") |
806 |
|
// fam : member name of the flexible array member |
807 |
|
// cap : capacity |
808 |
|
// |
809 |
|
// common properties of all VFLAs: |
810 |
|
// - are a miniobj (have magic as the first element) |
811 |
|
// - capacity member is the fam capacity |
812 |
|
// - used member is the number of fam elements used |
813 |
|
// |
814 |
|
// VFLA_SIZE ignores the cap == 0 case, we assert in _INIT |
815 |
|
// offsetoff ref: https://gustedt.wordpress.com/2011/03/14/flexible-array-member/ |
816 |
|
//lint -emacro(413, VFLA_SIZE) |
817 |
|
//lint -emacro(545, VFLA_SIZE) bsd offsetof() seems to be using & |
818 |
|
#define VFLA_SIZE(type, fam, cap) (offsetof(struct type, fam) + \ |
819 |
|
(cap) * sizeof(((struct type *)0)->fam[0])) |
820 |
|
#define VFLA_INIT_(type, cptr, mag, fam, cap, save) do { \ |
821 |
|
unsigned save = (cap); \ |
822 |
|
AN(save); \ |
823 |
|
memset((cptr), 0, VFLA_SIZE(type, fam, save)); \ |
824 |
|
(cptr)->magic = (mag); \ |
825 |
|
(cptr)->capacity = (save); \ |
826 |
|
} while (0) |
827 |
|
#define VFLA_INIT(type, cptr, mag, fam, cap) \ |
828 |
|
VFLA_INIT_(type, cptr, mag, fam, cap, VUNIQ_NAME(save)) |
829 |
|
// declare, allocate and initialize a local VFLA |
830 |
|
// the additional VLA buf declaration avoids |
831 |
|
// "Variable-sized object may not be initialized" |
832 |
|
#define VFLA_LOCAL_(type, name, mag, fam, cap, bufname) \ |
833 |
|
char bufname[VFLA_SIZE(type, fam, cap)]; \ |
834 |
|
struct type *name = (void *)bufname; \ |
835 |
|
VFLA_INIT(type, name, mag, fam, cap) |
836 |
|
#define VFLA_LOCAL(type, name, mag, fam, cap) \ |
837 |
|
VFLA_LOCAL_(type, name, mag, fam, cap, VUNIQ_NAME(buf)) |
838 |
|
// malloc and initialize a VFLA |
839 |
|
#define VFLA_ALLOC(type, name, mag, fam, cap) do { \ |
840 |
|
(name) = malloc(VFLA_SIZE(type, fam, cap)); \ |
841 |
|
if ((name) != NULL) \ |
842 |
|
VFLA_INIT(type, name, mag, fam, cap); \ |
843 |
|
} while(0) |
844 |
|
#define VFLA_FOREACH(var, cptr, fam) \ |
845 |
|
for (var = &(cptr)->fam[0]; var < &(cptr)->fam[(cptr)->used]; var++) |
846 |
|
// continue iterating after a break of a _FOREACH |
847 |
|
#define VFLA_FOREACH_RESUME(var, cptr, fam) \ |
848 |
|
for (; var != NULL && var < &(cptr)->fam[(cptr)->used]; var++) |
849 |
|
#define VFLA_GET(cptr, fam) ((cptr)->used < (cptr)->capacity ? &(cptr)->fam[(cptr)->used++] : NULL) |
850 |
|
// asserts sufficient capacity |
851 |
|
#define VFLA_ADD(cptr, fam, val) do { \ |
852 |
|
assert((cptr)->used < (cptr)->capacity); \ |
853 |
|
(cptr)->fam[(cptr)->used++] = (val); \ |
854 |
|
} while(0) |
855 |
|
|
856 |
|
#define VSCARAB_SIZE(cap) VFLA_SIZE(vscarab, s, cap) |
857 |
|
#define VSCARAB_INIT(scarab, cap) VFLA_INIT(vscarab, scarab, VSCARAB_MAGIC, s, cap) |
858 |
|
#define VSCARAB_LOCAL(scarab, cap) VFLA_LOCAL(vscarab, scarab, VSCARAB_MAGIC, s, cap) |
859 |
|
#define VSCARAB_ALLOC(scarab, cap) VFLA_ALLOC(vscarab, scarab, VSCARAB_MAGIC, s, cap) |
860 |
|
#define VSCARAB_FOREACH(var, scarab) VFLA_FOREACH(var, scarab, s) |
861 |
|
#define VSCARAB_FOREACH_RESUME(var, scarab) VFLA_FOREACH_RESUME(var, scarab, s) |
862 |
|
#define VSCARAB_GET(scarab) VFLA_GET(scarab, s) |
863 |
|
#define VSCARAB_ADD(scarab, val) VFLA_ADD(scarab, s, val) |
864 |
|
//lint -emacro(64, VSCARAB_ADD_IOV_NORET) weird flexelint bug? |
865 |
|
#define VSCARAB_ADD_IOV_NORET(scarab, vec) \ |
866 |
|
VSCARAB_ADD(scarab, ((struct viov){.lease = VAI_LEASE_NORET, .iov = (vec)})) |
867 |
|
#define VSCARAB_LAST(scarab) (&(scarab)->s[(scarab)->used - 1]) |
868 |
|
|
869 |
|
#define VSCARAB_CHECK(scarab) do { \ |
870 |
|
CHECK_OBJ(scarab, VSCARAB_MAGIC); \ |
871 |
|
assert(scarab->used <= scarab->capacity); \ |
872 |
|
} while(0) |
873 |
|
|
874 |
|
#define VSCARAB_CHECK_NOTNULL(scarab) do { \ |
875 |
|
AN(scarab); \ |
876 |
|
VSCARAB_CHECK(scarab); \ |
877 |
|
} while(0) |
878 |
|
|
879 |
|
/* |
880 |
|
* VSCARET: Varnish SCatter Array Return |
881 |
|
* |
882 |
|
* an array of leases obtained from a vscarab |
883 |
|
*/ |
884 |
|
|
885 |
|
struct vscaret { |
886 |
|
unsigned magic; |
887 |
|
#define VSCARET_MAGIC 0x9c1f3d7b |
888 |
|
unsigned capacity; |
889 |
|
unsigned used; |
890 |
|
uint64_t lease[] v_counted_by_(capacity); |
891 |
|
}; |
892 |
|
|
893 |
|
#define VSCARET_SIZE(cap) VFLA_SIZE(vscaret, lease, cap) |
894 |
|
#define VSCARET_INIT(scaret, cap) VFLA_INIT(vscaret, scaret, VSCARET_MAGIC, lease, cap) |
895 |
|
#define VSCARET_LOCAL(scaret, cap) VFLA_LOCAL(vscaret, scaret, VSCARET_MAGIC, lease, cap) |
896 |
|
#define VSCARET_ALLOC(scaret, cap) VFLA_ALLOC(vscaret, scaret, VSCARET_MAGIC, lease, cap) |
897 |
|
#define VSCARET_FOREACH(var, scaret) VFLA_FOREACH(var, scaret, lease) |
898 |
|
#define VSCARET_GET(scaret) VFLA_GET(scaret, lease) |
899 |
|
#define VSCARET_ADD(scaret, val) VFLA_ADD(scaret, lease, val) |
900 |
|
|
901 |
|
#define VSCARET_CHECK(scaret) do { \ |
902 |
|
CHECK_OBJ(scaret, VSCARET_MAGIC); \ |
903 |
|
assert(scaret->used <= scaret->capacity); \ |
904 |
|
} while(0) |
905 |
|
|
906 |
|
#define VSCARET_CHECK_NOTNULL(scaret) do { \ |
907 |
|
AN(scaret); \ |
908 |
|
VSCARET_CHECK(scaret); \ |
909 |
|
} while(0) |
910 |
|
|
911 |
|
/* |
912 |
|
* VSCARABs can contain leases which are not to be returned to storage, for |
913 |
|
* example static data or fragments of larger leases to be returned later. For |
914 |
|
* these cases, use this magic value as the lease. This is deliberately not 0 to |
915 |
|
* catch oversights. |
916 |
|
*/ |
917 |
|
#define VAI_LEASE_NORET ((uint64_t)0x8) |
918 |
|
|
919 |
|
vai_hdl ObjVAIinit(struct worker *, struct objcore *, struct ws *, |
920 |
|
vai_notify_cb *, void *); |
921 |
|
int ObjVAIlease(struct worker *, vai_hdl, struct vscarab *); |
922 |
|
int ObjVAIbuffer(struct worker *, vai_hdl, struct vscarab *); |
923 |
|
void ObjVAIreturn(struct worker *, vai_hdl, struct vscaret *); |
924 |
|
void ObjVAIfini(struct worker *, vai_hdl *); |
925 |
|
|
926 |
|
/* cache_req_body.c */ |
927 |
|
ssize_t VRB_Iterate(struct worker *, struct vsl_log *, struct req *, |
928 |
|
objiterate_f *func, void *priv); |
929 |
|
|
930 |
|
/* cache_session.c [SES] */ |
931 |
|
|
932 |
|
#define SESS_ATTR(UP, low, typ, len) \ |
933 |
|
int SES_Get_##low(const struct sess *sp, typ **dst); |
934 |
|
#include "tbl/sess_attr.h" |
935 |
|
const char *SES_Get_String_Attr(const struct sess *sp, enum sess_attr a); |
936 |
|
|
937 |
|
/* cache_shmlog.c */ |
938 |
|
void VSLv(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, va_list va); |
939 |
|
void VSL(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, ...) |
940 |
|
v_printflike_(3, 4); |
941 |
|
void VSLs(enum VSL_tag_e tag, vxid_t vxid, const struct strands *s); |
942 |
|
void VSLbv(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, va_list va); |
943 |
|
void VSLb(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, ...) |
944 |
|
v_printflike_(3, 4); |
945 |
|
void VSLbt(struct vsl_log *, enum VSL_tag_e tag, txt t); |
946 |
|
void VSLbs(struct vsl_log *, enum VSL_tag_e tag, const struct strands *s); |
947 |
|
void VSLb_ts(struct vsl_log *, const char *event, vtim_real first, |
948 |
|
vtim_real *pprev, vtim_real now); |
949 |
|
void VSLb_bin(struct vsl_log *, enum VSL_tag_e, ssize_t, const void*); |
950 |
|
int VSL_tag_is_masked(enum VSL_tag_e tag); |
951 |
|
|
952 |
|
static inline void |
953 |
|
VSLb_ts_req(struct req *req, const char *event, vtim_real now) |
954 |
|
{ |
955 |
|
|
956 |
|
if (isnan(req->t_first) || req->t_first == 0.) |
957 |
|
req->t_first = req->t_prev = now; |
958 |
|
VSLb_ts(req->vsl, event, req->t_first, &req->t_prev, now); |
959 |
|
} |
960 |
|
|
961 |
|
static inline void |
962 |
|
VSLb_ts_busyobj(struct busyobj *bo, const char *event, vtim_real now) |
963 |
|
{ |
964 |
|
|
965 |
|
if (isnan(bo->t_first) || bo->t_first == 0.) |
966 |
|
bo->t_first = bo->t_prev = now; |
967 |
|
VSLb_ts(bo->vsl, event, bo->t_first, &bo->t_prev, now); |
968 |
|
} |
969 |
|
|
970 |
|
/* cache_vcl.c */ |
971 |
|
const char *VCL_Name(const struct vcl *); |
972 |
|
|
973 |
|
/* cache_wrk.c */ |
974 |
|
|
975 |
|
typedef void *bgthread_t(struct worker *, void *priv); |
976 |
|
void WRK_BgThread(pthread_t *thr, const char *name, bgthread_t *func, |
977 |
|
void *priv); |
978 |
|
|
979 |
|
/* cache_ws.c */ |
980 |
|
void WS_Init(struct ws *ws, const char *id, void *space, unsigned len); |
981 |
|
|
982 |
|
unsigned WS_ReserveSize(struct ws *, unsigned); |
983 |
|
unsigned WS_ReserveAll(struct ws *); |
984 |
|
void WS_Release(struct ws *ws, unsigned bytes); |
985 |
|
void WS_ReleaseP(struct ws *ws, const char *ptr); |
986 |
|
void WS_Assert(const struct ws *ws); |
987 |
|
void WS_Reset(struct ws *ws, uintptr_t); |
988 |
|
void *WS_Alloc(struct ws *ws, unsigned bytes); |
989 |
|
void *WS_Copy(struct ws *ws, const void *str, int len); |
990 |
|
uintptr_t WS_Snapshot(struct ws *ws); |
991 |
|
int WS_Allocated(const struct ws *ws, const void *ptr, ssize_t len); |
992 |
|
unsigned WS_Dump(const struct ws *ws, char, size_t off, void *buf, size_t len); |
993 |
|
|
994 |
|
static inline void * |
995 |
48 |
WS_Reservation(const struct ws *ws) |
996 |
|
{ |
997 |
|
|
998 |
48 |
WS_Assert(ws); |
999 |
48 |
AN(ws->r); |
1000 |
48 |
AN(ws->f); |
1001 |
48 |
return (ws->f); |
1002 |
|
} |
1003 |
|
|
1004 |
|
static inline unsigned |
1005 |
|
WS_ReservationSize(const struct ws *ws) |
1006 |
|
{ |
1007 |
|
|
1008 |
|
AN(ws->r); |
1009 |
|
return (ws->r - ws->f); |
1010 |
|
} |
1011 |
|
|
1012 |
|
static inline unsigned |
1013 |
|
WS_ReserveLumps(struct ws *ws, size_t sz) |
1014 |
|
{ |
1015 |
|
|
1016 |
|
AN(sz); |
1017 |
|
return (WS_ReserveAll(ws) / sz); |
1018 |
|
} |
1019 |
|
|
1020 |
|
/* cache_ws_common.c */ |
1021 |
|
void WS_MarkOverflow(struct ws *ws); |
1022 |
|
int WS_Overflowed(const struct ws *ws); |
1023 |
|
|
1024 |
|
const char *WS_Printf(struct ws *ws, const char *fmt, ...) v_printflike_(2, 3); |
1025 |
|
|
1026 |
|
void WS_VSB_new(struct vsb *, struct ws *); |
1027 |
|
char *WS_VSB_finish(struct vsb *, struct ws *, size_t *); |
1028 |
|
|
1029 |
|
/* WS utility */ |
1030 |
|
#define WS_TASK_ALLOC_OBJ(ctx, ptr, magic) do { \ |
1031 |
|
ptr = WS_Alloc((ctx)->ws, sizeof *(ptr)); \ |
1032 |
|
if ((ptr) == NULL) \ |
1033 |
|
VRT_fail(ctx, "Out of workspace for " #magic); \ |
1034 |
|
else \ |
1035 |
|
INIT_OBJ(ptr, magic); \ |
1036 |
|
} while(0) |
1037 |
|
|
1038 |
|
/* cache_rfc2616.c */ |
1039 |
|
void RFC2616_Ttl(struct busyobj *, vtim_real now, vtim_real *t_origin, |
1040 |
|
float *ttl, float *grace, float *keep); |
1041 |
|
unsigned RFC2616_Req_Gzip(const struct http *); |
1042 |
|
int RFC2616_Do_Cond(const struct req *sp); |
1043 |
|
void RFC2616_Weaken_Etag(struct http *hp); |
1044 |
|
void RFC2616_Vary_AE(struct http *hp); |
1045 |
|
const char * RFC2616_Strong_LM(const struct http *hp, struct worker *wrk, |
1046 |
|
struct objcore *oc); |
1047 |
|
|
1048 |
|
/* |
1049 |
|
* We want to cache the most recent timestamp in wrk->lastused to avoid |
1050 |
|
* extra timestamps in cache_pool.c. Hide this detail with a macro |
1051 |
|
*/ |
1052 |
|
#define W_TIM_real(w) ((w)->lastused = VTIM_real()) |