| | varnish-cache/bin/varnishd/cache/cache_varnishd.h |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
* |
30 |
|
* Stuff that should *never* be exposed to a VMOD |
31 |
|
*/ |
32 |
|
|
33 |
|
#include "cache.h" |
34 |
|
|
35 |
|
#include "vsb.h" |
36 |
|
|
37 |
|
#include <sys/socket.h> |
38 |
|
|
39 |
|
#include <string.h> |
40 |
|
#include <limits.h> |
41 |
|
#include <unistd.h> |
42 |
|
|
43 |
|
#include "common/common_param.h" |
44 |
|
|
45 |
|
#ifdef NOT_IN_A_VMOD |
46 |
|
# include "VSC_main.h" |
47 |
|
#endif |
48 |
|
|
49 |
|
/*--------------------------------------------------------------------*/ |
50 |
|
|
51 |
|
struct vfp; |
52 |
|
struct vdp; |
53 |
|
struct cli_proto; |
54 |
|
struct poolparam; |
55 |
|
|
56 |
|
/*--------------------------------------------------------------------*/ |
57 |
|
|
58 |
|
typedef enum req_fsm_nxt req_state_f(struct worker *, struct req *); |
59 |
|
struct req_step { |
60 |
|
const char *name; |
61 |
|
req_state_f *func; |
62 |
|
}; |
63 |
|
|
64 |
|
extern const struct req_step R_STP_TRANSPORT[1]; |
65 |
|
extern const struct req_step R_STP_RECV[1]; |
66 |
|
|
67 |
|
struct vxid_pool { |
68 |
|
uint64_t next; |
69 |
|
uint32_t count; |
70 |
|
}; |
71 |
|
|
72 |
|
/*-------------------------------------------------------------------- |
73 |
|
* Private part of worker threads |
74 |
|
*/ |
75 |
|
|
76 |
|
struct worker_priv { |
77 |
|
unsigned magic; |
78 |
|
#define WORKER_PRIV_MAGIC 0x3047db99 |
79 |
|
struct objhead *nobjhead; |
80 |
|
struct objcore *nobjcore; |
81 |
|
void *nhashpriv; |
82 |
|
struct vxid_pool vxid_pool[1]; |
83 |
|
struct vcl *vcl; |
84 |
|
}; |
85 |
|
|
86 |
|
/*-------------------------------------------------------------------- |
87 |
|
* HTTP Protocol connection structure |
88 |
|
* |
89 |
|
* This is the protocol independent object for a HTTP connection, used |
90 |
|
* both for backend and client sides. |
91 |
|
* |
92 |
|
*/ |
93 |
|
|
94 |
|
struct http_conn { |
95 |
|
unsigned magic; |
96 |
|
#define HTTP_CONN_MAGIC 0x3e19edd1 |
97 |
|
|
98 |
|
int *rfd; |
99 |
|
stream_close_t doclose; |
100 |
|
body_status_t body_status; |
101 |
|
struct ws *ws; |
102 |
|
char *rxbuf_b; |
103 |
|
char *rxbuf_e; |
104 |
|
char *pipeline_b; |
105 |
|
char *pipeline_e; |
106 |
|
ssize_t content_length; |
107 |
|
void *priv; |
108 |
|
|
109 |
|
/* Timeouts */ |
110 |
|
vtim_dur first_byte_timeout; |
111 |
|
vtim_dur between_bytes_timeout; |
112 |
|
}; |
113 |
|
|
114 |
|
enum htc_status_e { |
115 |
|
#define HTC_STATUS(e, n, s, l) HTC_S_ ## e = n, |
116 |
|
#include "tbl/htc.h" |
117 |
|
}; |
118 |
|
|
119 |
|
typedef enum htc_status_e htc_complete_f(struct http_conn *); |
120 |
|
|
121 |
|
/* -------------------------------------------------------------------*/ |
122 |
|
|
123 |
|
extern volatile struct params * cache_param; |
124 |
|
|
125 |
|
/* ------------------------------------------------------------------- |
126 |
|
* The VCF facility is deliberately undocumented, use at your peril. |
127 |
|
*/ |
128 |
|
|
129 |
|
struct vcf_return { |
130 |
|
const char *name; |
131 |
|
}; |
132 |
|
|
133 |
|
#define VCF_RETURNS() \ |
134 |
|
VCF_RETURN(CONTINUE) \ |
135 |
|
VCF_RETURN(DEFAULT) \ |
136 |
|
VCF_RETURN(MISS) \ |
137 |
|
VCF_RETURN(HIT) |
138 |
|
|
139 |
|
#define VCF_RETURN(x) extern const struct vcf_return VCF_##x[1]; |
140 |
|
VCF_RETURNS() |
141 |
|
#undef VCF_RETURN |
142 |
|
|
143 |
|
typedef const struct vcf_return *vcf_func_f( |
144 |
|
struct req *req, |
145 |
|
struct objcore **oc, |
146 |
|
struct objcore **oc_exp, |
147 |
|
int state); |
148 |
|
|
149 |
|
struct vcf { |
150 |
|
unsigned magic; |
151 |
|
#define VCF_MAGIC 0x183285d1 |
152 |
|
vcf_func_f *func; |
153 |
|
void *priv; |
154 |
|
}; |
155 |
|
|
156 |
|
/* Prototypes etc ----------------------------------------------------*/ |
157 |
|
|
158 |
|
/* cache_backend.c */ |
159 |
|
struct backend; |
160 |
|
|
161 |
|
/* cache_backend_cfg.c */ |
162 |
|
void VBE_InitCfg(void); |
163 |
|
|
164 |
|
/* cache_ban.c */ |
165 |
|
|
166 |
|
/* for stevedoes resurrecting bans */ |
167 |
|
void BAN_Hold(void); |
168 |
|
void BAN_Release(void); |
169 |
|
void BAN_Reload(const uint8_t *ban, unsigned len); |
170 |
|
struct ban *BAN_FindBan(vtim_real t0); |
171 |
|
void BAN_RefBan(struct objcore *oc, struct ban *); |
172 |
|
vtim_real BAN_Time(const struct ban *ban); |
173 |
|
|
174 |
|
/* cache_busyobj.c */ |
175 |
|
struct busyobj *VBO_GetBusyObj(const struct worker *, const struct req *); |
176 |
|
void VBO_ReleaseBusyObj(struct worker *wrk, struct busyobj **busyobj); |
177 |
|
|
178 |
|
/* cache_director.c */ |
179 |
|
int VDI_GetHdr(struct busyobj *); |
180 |
|
VCL_IP VDI_GetIP(struct busyobj *); |
181 |
|
void VDI_Finish(struct busyobj *bo); |
182 |
|
stream_close_t VDI_Http1Pipe(struct req *, struct busyobj *); |
183 |
|
void VDI_Panic(const struct director *, struct vsb *, const char *nm); |
184 |
|
void VDI_Event(const struct director *d, enum vcl_event_e ev); |
185 |
|
void VDI_Init(void); |
186 |
|
|
187 |
|
/* cache_deliver_proc.c */ |
188 |
|
void VDP_Fini(const struct vdp_ctx *vdc); |
189 |
|
void VDP_Init(struct vdp_ctx *vdc, struct worker *wrk, struct vsl_log *vsl, |
190 |
|
const struct req *req, const struct busyobj *bo, intmax_t *cl); |
191 |
|
uint64_t VDP_Close(struct vdp_ctx *, struct objcore *, struct boc *); |
192 |
|
void VDP_Panic(struct vsb *vsb, const struct vdp_ctx *vdc); |
193 |
|
int VDP_Push(VRT_CTX, struct vdp_ctx *, struct ws *, const struct vdp *, |
194 |
|
void *priv); |
195 |
|
int VDP_ObjIterate(void *priv, unsigned flush, const void *ptr, ssize_t len); |
196 |
|
int VDP_DeliverObj(struct vdp_ctx *vdc, struct objcore *oc); |
197 |
|
extern const struct vdp VDP_gunzip; |
198 |
|
extern const struct vdp VDP_esi; |
199 |
|
extern const struct vdp VDP_range; |
200 |
|
|
201 |
|
|
202 |
|
/* cache_exp.c */ |
203 |
|
vtim_real EXP_Ttl(const struct req *, const struct objcore *); |
204 |
|
vtim_real EXP_Ttl_grace(const struct req *, const struct objcore *oc); |
205 |
|
void EXP_RefNewObjcore(struct objcore *); |
206 |
|
void EXP_Insert(struct worker *wrk, struct objcore *oc); |
207 |
|
void EXP_Remove(struct objcore *, const struct objcore *); |
208 |
|
|
209 |
|
#define EXP_Dttl(req, oc) (oc->ttl - (req->t_req - oc->t_origin)) |
210 |
|
|
211 |
|
/* cache_expire.c */ |
212 |
|
|
213 |
|
/* |
214 |
|
* The set of variables which control object expiry are inconveniently |
215 |
|
* 24 bytes long (double+3*float) and this causes alignment waste if |
216 |
|
* we put then in a struct. |
217 |
|
* These three macros operate on the struct we don't use. |
218 |
|
*/ |
219 |
|
|
220 |
|
#define EXP_ZERO(xx) \ |
221 |
|
do { \ |
222 |
|
(xx)->t_origin = 0.0; \ |
223 |
|
(xx)->ttl = 0.0; \ |
224 |
|
(xx)->grace = 0.0; \ |
225 |
|
(xx)->keep = 0.0; \ |
226 |
|
} while (0) |
227 |
|
|
228 |
|
#define EXP_COPY(to,fm) \ |
229 |
|
do { \ |
230 |
|
(to)->t_origin = (fm)->t_origin; \ |
231 |
|
(to)->ttl = (fm)->ttl; \ |
232 |
|
(to)->grace = (fm)->grace; \ |
233 |
|
(to)->keep = (fm)->keep; \ |
234 |
|
} while (0) |
235 |
|
|
236 |
|
#define EXP_WHEN(to) \ |
237 |
|
((to)->t_origin + (to)->ttl + (to)->grace + (to)->keep) |
238 |
|
|
239 |
|
/* cache_exp.c */ |
240 |
|
void EXP_Rearm(struct objcore *oc, vtim_real now, |
241 |
|
vtim_dur ttl, vtim_dur grace, vtim_dur keep); |
242 |
|
void EXP_Reduce(struct objcore *oc, vtim_real now, |
243 |
|
vtim_dur ttl, vtim_dur grace, vtim_dur keep); |
244 |
|
|
245 |
|
/* From cache_main.c */ |
246 |
|
void BAN_Init(void); |
247 |
|
void BAN_Compile(void); |
248 |
|
void BAN_Shutdown(void); |
249 |
|
|
250 |
|
/* From cache_hash.c */ |
251 |
|
void BAN_NewObjCore(struct objcore *oc); |
252 |
|
void BAN_DestroyObj(struct objcore *oc); |
253 |
|
int BAN_CheckObject(struct worker *, struct objcore *, struct req *); |
254 |
|
|
255 |
|
/* cache_busyobj.c */ |
256 |
|
void VBO_Init(void); |
257 |
|
|
258 |
|
/* cache_cli.c [CLI] */ |
259 |
|
void CLI_Init(void); |
260 |
|
void CLI_Run(void); |
261 |
|
void CLI_AddFuncs(struct cli_proto *p); |
262 |
|
|
263 |
|
/* cache_expire.c */ |
264 |
|
void EXP_Init(void); |
265 |
|
void EXP_Shutdown(void); |
266 |
|
|
267 |
|
/* cache_fetch.c */ |
268 |
|
enum vbf_fetch_mode_e { |
269 |
|
VBF_NORMAL = 0, |
270 |
|
VBF_PASS = 1, |
271 |
|
VBF_BACKGROUND = 2, |
272 |
|
}; |
273 |
|
void VBF_Fetch(struct worker *wrk, struct req *req, |
274 |
|
struct objcore *oc, struct objcore *oldoc, enum vbf_fetch_mode_e); |
275 |
|
const char *VBF_Get_Filter_List(struct busyobj *); |
276 |
|
void Bereq_Rollback(VRT_CTX); |
277 |
|
|
278 |
|
/* cache_fetch_proc.c */ |
279 |
|
void VFP_Init(void); |
280 |
|
struct vfp_entry *VFP_Push(struct vfp_ctx *, const struct vfp *); |
281 |
|
enum vfp_status VFP_GetStorage(struct vfp_ctx *, ssize_t *sz, uint8_t **ptr); |
282 |
|
void VFP_Extend(const struct vfp_ctx *, ssize_t sz, enum vfp_status); |
283 |
|
void VFP_Setup(struct vfp_ctx *vc, struct worker *wrk); |
284 |
|
int VFP_Open(VRT_CTX, struct vfp_ctx *); |
285 |
|
uint64_t VFP_Close(struct vfp_ctx *); |
286 |
|
|
287 |
|
extern const struct vfp VFP_gunzip; |
288 |
|
extern const struct vfp VFP_gzip; |
289 |
|
extern const struct vfp VFP_testgunzip; |
290 |
|
extern const struct vfp VFP_esi; |
291 |
|
extern const struct vfp VFP_esi_gzip; |
292 |
|
|
293 |
|
/* cache_http.c */ |
294 |
|
void HTTP_Init(void); |
295 |
|
|
296 |
|
/* cache_http1_proto.c */ |
297 |
|
|
298 |
|
htc_complete_f HTTP1_Complete; |
299 |
|
uint16_t HTTP1_DissectRequest(struct http_conn *, struct http *); |
300 |
|
uint16_t HTTP1_DissectResponse(struct http_conn *, struct http *resp, |
301 |
|
const struct http *req); |
302 |
|
unsigned HTTP1_Write(const struct worker *w, const struct http *hp, const int*); |
303 |
|
|
304 |
|
/* cache_main.c */ |
305 |
|
vxid_t VXID_Get(const struct worker *, uint64_t marker); |
306 |
|
extern pthread_key_t panic_key; |
307 |
|
extern pthread_key_t witness_key; |
308 |
|
|
309 |
|
void THR_SetName(const char *name); |
310 |
|
const char* THR_GetName(void); |
311 |
|
void THR_SetBusyobj(const struct busyobj *); |
312 |
|
struct busyobj * THR_GetBusyobj(void); |
313 |
|
void THR_SetRequest(const struct req *); |
314 |
|
struct req * THR_GetRequest(void); |
315 |
|
void THR_SetWorker(const struct worker *); |
316 |
|
struct worker * THR_GetWorker(void); |
317 |
|
void THR_Init(void); |
318 |
|
|
319 |
|
/* cache_lck.c */ |
320 |
|
void LCK_Init(void); |
321 |
|
|
322 |
|
/* cache_mempool.c */ |
323 |
|
void MPL_AssertSane(const void *item); |
324 |
|
struct mempool * MPL_New(const char *name, volatile struct poolparam *pp, |
325 |
|
volatile unsigned *cur_size); |
326 |
|
void MPL_Destroy(struct mempool **mpp); |
327 |
|
void *MPL_Get(struct mempool *mpl, unsigned *size); |
328 |
|
void MPL_Free(struct mempool *mpl, void *item); |
329 |
|
|
330 |
|
/* cache_obj.c */ |
331 |
|
void ObjInit(void); |
332 |
|
struct objcore * ObjNew(const struct worker *); |
333 |
|
void ObjDestroy(const struct worker *, struct objcore **); |
334 |
|
int ObjGetSpace(struct worker *, struct objcore *, ssize_t *sz, uint8_t **ptr); |
335 |
|
void ObjExtend(struct worker *, struct objcore *, ssize_t l, int final); |
336 |
|
uint64_t ObjWaitExtend(const struct worker *, const struct objcore *, |
337 |
|
uint64_t l, enum boc_state_e *statep); |
338 |
|
void ObjSetState(struct worker *, const struct objcore *, |
339 |
|
enum boc_state_e next); |
340 |
|
void ObjWaitState(const struct objcore *, enum boc_state_e want); |
341 |
|
void ObjTouch(struct worker *, struct objcore *, vtim_real now); |
342 |
|
void ObjFreeObj(struct worker *, struct objcore *); |
343 |
|
void ObjSlim(struct worker *, struct objcore *); |
344 |
|
void *ObjSetAttr(struct worker *, struct objcore *, enum obj_attr, |
345 |
|
ssize_t len, const void *); |
346 |
|
int ObjCopyAttr(struct worker *, struct objcore *, struct objcore *, |
347 |
|
enum obj_attr attr); |
348 |
|
void ObjBocDone(struct worker *, struct objcore *, struct boc **); |
349 |
|
|
350 |
|
int ObjSetDouble(struct worker *, struct objcore *, enum obj_attr, double); |
351 |
|
int ObjSetU64(struct worker *, struct objcore *, enum obj_attr, uint64_t); |
352 |
|
int ObjSetXID(struct worker *, struct objcore *, vxid_t); |
353 |
|
|
354 |
|
void ObjSetFlag(struct worker *, struct objcore *, enum obj_flags of, int val); |
355 |
|
|
356 |
|
void ObjSendEvent(struct worker *, struct objcore *oc, unsigned event); |
357 |
|
|
358 |
|
#define OEV_INSERT (1U<<1) |
359 |
|
#define OEV_BANCHG (1U<<2) |
360 |
|
#define OEV_TTLCHG (1U<<3) |
361 |
|
#define OEV_EXPIRE (1U<<4) |
362 |
|
|
363 |
|
#define OEV_MASK (OEV_INSERT|OEV_BANCHG|OEV_TTLCHG|OEV_EXPIRE) |
364 |
|
|
365 |
|
typedef void obj_event_f(struct worker *, void *priv, struct objcore *, |
366 |
|
unsigned); |
367 |
|
|
368 |
|
uintptr_t ObjSubscribeEvents(obj_event_f *, void *, unsigned mask); |
369 |
|
void ObjUnsubscribeEvents(uintptr_t *); |
370 |
|
|
371 |
|
/* cache_panic.c */ |
372 |
|
void PAN_Init(void); |
373 |
|
int PAN__DumpStruct(struct vsb *vsb, int block, int track, const void *ptr, |
374 |
|
const char *smagic, unsigned magic, const char *fmt, ...) |
375 |
|
v_printflike_(7,8); |
376 |
|
|
377 |
|
#define PAN_CheckMagic(vsb, ptr, exp) \ |
378 |
|
do { \ |
379 |
|
if ((ptr)->magic != (exp)) \ |
380 |
|
VSB_printf((vsb), \ |
381 |
|
"MAGIC at %p is 0x%08x (Should be: %s/0x%08x)\n", \ |
382 |
|
ptr, (ptr)->magic, #exp, exp); \ |
383 |
|
} while(0) |
384 |
|
|
385 |
|
#define PAN_dump_struct(vsb, ptr, magic, ...) \ |
386 |
|
PAN__DumpStruct(vsb, 1, 1, ptr, #magic, magic, __VA_ARGS__) |
387 |
|
|
388 |
|
#define PAN_dump_oneline(vsb, ptr, magic, ...) \ |
389 |
|
PAN__DumpStruct(vsb, 0, 1, ptr, #magic, magic, __VA_ARGS__) |
390 |
|
|
391 |
|
#define PAN_dump_once(vsb, ptr, magic, ...) \ |
392 |
|
PAN__DumpStruct(vsb, 1, 0, ptr, #magic, magic, __VA_ARGS__) |
393 |
|
|
394 |
|
#define PAN_dump_once_oneline(vsb, ptr, magic, ...) \ |
395 |
|
PAN__DumpStruct(vsb, 0, 0, ptr, #magic, magic, __VA_ARGS__) |
396 |
|
|
397 |
|
/* cache_pool.c */ |
398 |
|
void Pool_Init(void); |
399 |
|
int Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio prio); |
400 |
|
int Pool_Task_Arg(struct worker *, enum task_prio, task_func_t *, |
401 |
|
const void *arg, size_t arg_len); |
402 |
|
void Pool_Sumstat(const struct worker *w); |
403 |
|
int Pool_TrySumstat(const struct worker *wrk); |
404 |
|
void Pool_PurgeStat(unsigned nobj); |
405 |
|
int Pool_Task_Any(struct pool_task *task, enum task_prio prio); |
406 |
|
void pan_pool(struct vsb *); |
407 |
|
|
408 |
|
/* cache_range.c */ |
409 |
|
int VRG_CheckBo(struct busyobj *); |
410 |
|
|
411 |
|
/* cache_req.c */ |
412 |
|
struct req *Req_New(struct sess *); |
413 |
|
void Req_Release(struct req *); |
414 |
|
void Req_Rollback(VRT_CTX); |
415 |
|
void Req_Cleanup(struct sess *sp, struct worker *wrk, struct req *req); |
416 |
|
void Req_Fail(struct req *req, stream_close_t reason); |
417 |
|
void Req_AcctLogCharge(struct VSC_main_wrk *, struct req *); |
418 |
|
void Req_LogHit(struct worker *, struct req *, struct objcore *, intmax_t); |
419 |
|
const char *Req_LogStart(const struct worker *, struct req *); |
420 |
|
|
421 |
|
/* cache_req_body.c */ |
422 |
|
int VRB_Ignore(struct req *); |
423 |
|
ssize_t VRB_Cache(struct req *, ssize_t maxsize); |
424 |
|
void VRB_Free(struct req *); |
425 |
|
|
426 |
|
/* cache_req_fsm.c [CNT] */ |
427 |
|
|
428 |
|
int Resp_Setup_Deliver(struct req *); |
429 |
|
void Resp_Setup_Synth(struct req *); |
430 |
|
|
431 |
|
enum req_fsm_nxt { |
432 |
|
REQ_FSM_MORE, |
433 |
|
REQ_FSM_DONE, |
434 |
|
REQ_FSM_DISEMBARK, |
435 |
|
}; |
436 |
|
|
437 |
|
void CNT_Embark(struct worker *, struct req *); |
438 |
|
enum req_fsm_nxt CNT_Request(struct req *); |
439 |
|
|
440 |
|
/* cache_session.c */ |
441 |
|
void SES_NewPool(struct pool *, unsigned pool_no); |
442 |
|
void SES_DestroyPool(struct pool *); |
443 |
|
void SES_Wait(struct sess *, const struct transport *); |
444 |
|
void SES_Ref(struct sess *sp); |
445 |
|
void SES_Rel(struct sess *sp); |
446 |
|
|
447 |
|
void HTC_Status(enum htc_status_e, const char **, const char **); |
448 |
|
void HTC_RxInit(struct http_conn *htc, struct ws *ws); |
449 |
|
void HTC_RxPipeline(struct http_conn *htc, char *); |
450 |
|
enum htc_status_e HTC_RxStuff(struct http_conn *, htc_complete_f *, |
451 |
|
vtim_real *t1, vtim_real *t2, vtim_real ti, vtim_real tn, vtim_dur td, |
452 |
|
int maxbytes); |
453 |
|
|
454 |
|
#define SESS_ATTR(UP, low, typ, len) \ |
455 |
|
int SES_Set_##low(const struct sess *sp, const typ *src); \ |
456 |
|
int SES_Reserve_##low(struct sess *sp, typ **dst, ssize_t *sz); |
457 |
|
#include "tbl/sess_attr.h" |
458 |
|
int SES_Set_String_Attr(struct sess *sp, enum sess_attr a, const char *src); |
459 |
|
|
460 |
|
/* cache_shmlog.c */ |
461 |
|
extern struct VSC_main *VSC_C_main; |
462 |
|
void VSM_Init(void); |
463 |
|
void VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len); |
464 |
|
void VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, |
465 |
|
vxid_t vxid); |
466 |
|
void VSL_End(struct vsl_log *vsl); |
467 |
|
void VSL_Flush(struct vsl_log *, int overflow); |
468 |
|
|
469 |
|
/* cache_conn_pool.c */ |
470 |
|
struct conn_pool; |
471 |
|
void VCP_Init(void); |
472 |
|
void VCP_Panic(struct vsb *, struct conn_pool *); |
473 |
|
|
474 |
|
/* cache_backend_probe.c */ |
475 |
|
void VBP_Init(void); |
476 |
|
|
477 |
|
/* cache_vary.c */ |
478 |
|
int VRY_Create(struct busyobj *bo, struct vsb **psb); |
479 |
|
int VRY_Match(const struct req *, const uint8_t *vary); |
480 |
|
void VRY_Prep(struct req *); |
481 |
|
void VRY_Clear(struct req *); |
482 |
|
enum vry_finish_flag { KEEP, DISCARD }; |
483 |
|
void VRY_Finish(struct req *req, enum vry_finish_flag); |
484 |
|
|
485 |
|
/* cache_vcl.c */ |
486 |
|
void VCL_Bo2Ctx(struct vrt_ctx *, struct busyobj *); |
487 |
|
void VCL_Req2Ctx(struct vrt_ctx *, struct req *); |
488 |
|
struct vrt_ctx *VCL_Get_CliCtx(int); |
489 |
|
struct vsb *VCL_Rel_CliCtx(struct vrt_ctx **); |
490 |
|
void VCL_Panic(struct vsb *, const char *nm, const struct vcl *); |
491 |
|
void VCL_Poll(void); |
492 |
|
void VCL_Init(void); |
493 |
|
|
494 |
|
#define VCL_MET_MAC(l,u,t,b) \ |
495 |
|
void VCL_##l##_method(struct vcl *, struct worker *, struct req *, \ |
496 |
|
struct busyobj *bo, void *specific); |
497 |
|
#include "tbl/vcl_returns.h" |
498 |
|
|
499 |
|
|
500 |
|
typedef int vcl_be_func(struct cli *, struct director *, void *); |
501 |
|
|
502 |
|
int VCL_IterDirector(struct cli *, const char *, vcl_be_func *, void *); |
503 |
|
|
504 |
|
/* cache_vrt.c */ |
505 |
|
void pan_privs(struct vsb *, const struct vrt_privs *); |
506 |
|
|
507 |
|
/* cache_vrt_filter.c */ |
508 |
|
int VCL_StackVFP(struct vfp_ctx *, const struct vcl *, const char *); |
509 |
|
int VCL_StackVDP(struct vdp_ctx *vdc, const struct vcl *vcl, const char *fl, |
510 |
|
struct req *req, struct busyobj *bo); |
511 |
|
const char *resp_Get_Filter_List(struct req *req); |
512 |
|
void VCL_VRT_Init(void); |
513 |
|
|
514 |
|
/* cache_vrt_vcl.c */ |
515 |
|
const char *VCL_Return_Name(unsigned); |
516 |
|
const char *VCL_Method_Name(unsigned); |
517 |
|
void VCL_Refresh(struct vcl **); |
518 |
|
void VCL_Recache(const struct worker *, struct vcl **); |
519 |
|
void VCL_Ref(struct vcl *); |
520 |
|
void VCL_Rel(struct vcl **); |
521 |
|
VCL_BACKEND VCL_DefaultDirector(const struct vcl *); |
522 |
|
const struct vrt_backend_probe *VCL_DefaultProbe(const struct vcl *); |
523 |
|
|
524 |
|
/* cache_vrt_priv.c */ |
525 |
|
extern struct vrt_privs cli_task_privs[1]; |
526 |
|
void VCL_TaskEnter(struct vrt_privs *); |
527 |
|
void VCL_TaskLeave(VRT_CTX, struct vrt_privs *); |
528 |
|
|
529 |
|
/* cache_vrt_vmod.c */ |
530 |
|
void VMOD_Init(void); |
531 |
|
void VMOD_Panic(struct vsb *); |
532 |
|
|
533 |
|
#if defined(ENABLE_COVERAGE) || defined(ENABLE_SANITIZER) |
534 |
|
# define DONT_DLCLOSE_VMODS |
535 |
|
#endif |
536 |
|
|
537 |
|
/* cache_wrk.c */ |
538 |
|
void WRK_Init(void); |
539 |
|
void WRK_AddStat(const struct worker *); |
540 |
|
void WRK_Log(enum VSL_tag_e, const char *, ...); |
541 |
|
|
542 |
|
/* cache_vpi.c */ |
543 |
|
extern const size_t vpi_wrk_len; |
544 |
|
void VPI_wrk_init(struct worker *, void *, size_t); |
545 |
|
void VPI_Panic(struct vsb *, const struct wrk_vpi *, const struct vcl *); |
546 |
|
|
547 |
|
/* cache_ws.c */ |
548 |
|
void WS_Panic(struct vsb *, const struct ws *); |
549 |
|
static inline int |
550 |
947949 |
WS_IsReserved(const struct ws *ws) |
551 |
|
{ |
552 |
|
|
553 |
947949 |
return (ws->r != NULL); |
554 |
|
} |
555 |
|
|
556 |
|
void *WS_AtOffset(const struct ws *ws, unsigned off, unsigned len); |
557 |
|
unsigned WS_ReservationOffset(const struct ws *ws); |
558 |
|
int WS_Pipeline(struct ws *, const void *b, const void *e, unsigned rollback); |
559 |
|
|
560 |
|
/* cache_ws_common.c */ |
561 |
|
void WS_Id(const struct ws *ws, char *id); |
562 |
|
void WS_Rollback(struct ws *, uintptr_t); |
563 |
|
|
564 |
|
/* http1/cache_http1_pipe.c */ |
565 |
|
void V1P_Init(void); |
566 |
|
|
567 |
|
/* cache_http2_deliver.c */ |
568 |
|
void V2D_Init(void); |
569 |
|
|
570 |
|
/* stevedore.c */ |
571 |
|
void STV_open(void); |
572 |
|
void STV_close(void); |
573 |
|
const struct stevedore *STV_next(void); |
574 |
|
int STV_BanInfoDrop(const uint8_t *ban, unsigned len); |
575 |
|
int STV_BanInfoNew(const uint8_t *ban, unsigned len); |
576 |
|
void STV_BanExport(const uint8_t *banlist, unsigned len); |
577 |
|
// STV_NewObject() len is space for OBJ_VARATTR |
578 |
|
int STV_NewObject(struct worker *, struct objcore *, |
579 |
|
const struct stevedore *, unsigned len); |
580 |
|
|
581 |
|
struct stv_buffer; |
582 |
|
struct stv_buffer *STV_AllocBuf(struct worker *wrk, const struct stevedore *stv, |
583 |
|
size_t size); |
584 |
|
void STV_FreeBuf(struct worker *wrk, struct stv_buffer **pstvbuf); |
585 |
|
void *STV_GetBufPtr(struct stv_buffer *stvbuf, size_t *psize); |
586 |
|
|
587 |
|
#if WITH_PERSISTENT_STORAGE |
588 |
|
/* storage_persistent.c */ |
589 |
|
void SMP_Ready(void); |
590 |
|
#endif |
591 |
|
|
592 |
|
#define FEATURE(x) COM_FEATURE(cache_param->feature_bits, x) |
593 |
|
#define EXPERIMENT(x) COM_EXPERIMENT(cache_param->experimental_bits, x) |
594 |
|
#define DO_DEBUG(x) COM_DO_DEBUG(cache_param->debug_bits, x) |
595 |
|
|
596 |
|
#define DSL(debug_bit, id, ...) \ |
597 |
|
do { \ |
598 |
|
if (DO_DEBUG(debug_bit)) \ |
599 |
|
VSL(SLT_Debug, (id), __VA_ARGS__); \ |
600 |
|
} while (0) |
601 |
|
|
602 |
|
#define DSLb(debug_bit, ...) \ |
603 |
|
do { \ |
604 |
|
if (DO_DEBUG(debug_bit)) \ |
605 |
|
WRK_Log(SLT_Debug, __VA_ARGS__); \ |
606 |
|
} while (0) |