| | varnish-cache/bin/varnishd/cache/cache_varnishd.h |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2006 Verdens Gang AS |
| 2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
| 3 |
|
* All rights reserved. |
| 4 |
|
* |
| 5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 6 |
|
* |
| 7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 8 |
|
* |
| 9 |
|
* Redistribution and use in source and binary forms, with or without |
| 10 |
|
* modification, are permitted provided that the following conditions |
| 11 |
|
* are met: |
| 12 |
|
* 1. Redistributions of source code must retain the above copyright |
| 13 |
|
* notice, this list of conditions and the following disclaimer. |
| 14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 15 |
|
* notice, this list of conditions and the following disclaimer in the |
| 16 |
|
* documentation and/or other materials provided with the distribution. |
| 17 |
|
* |
| 18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 |
|
* SUCH DAMAGE. |
| 29 |
|
* |
| 30 |
|
* Stuff that should *never* be exposed to a VMOD |
| 31 |
|
*/ |
| 32 |
|
|
| 33 |
|
#include "cache.h" |
| 34 |
|
|
| 35 |
|
#include "vsb.h" |
| 36 |
|
|
| 37 |
|
#include <sys/socket.h> |
| 38 |
|
|
| 39 |
|
#include <string.h> |
| 40 |
|
#include <limits.h> |
| 41 |
|
#include <unistd.h> |
| 42 |
|
|
| 43 |
|
#include "common/common_param.h" |
| 44 |
|
|
| 45 |
|
#ifdef NOT_IN_A_VMOD |
| 46 |
|
# include "VSC_main.h" |
| 47 |
|
#endif |
| 48 |
|
|
| 49 |
|
/*--------------------------------------------------------------------*/ |
| 50 |
|
|
| 51 |
|
struct vfp; |
| 52 |
|
struct vdp; |
| 53 |
|
struct cli_proto; |
| 54 |
|
struct poolparam; |
| 55 |
|
|
| 56 |
|
/*--------------------------------------------------------------------*/ |
| 57 |
|
|
| 58 |
|
typedef enum req_fsm_nxt req_state_f(struct worker *, struct req *); |
| 59 |
|
struct req_step { |
| 60 |
|
const char *name; |
| 61 |
|
req_state_f *func; |
| 62 |
|
}; |
| 63 |
|
|
| 64 |
|
extern const struct req_step R_STP_TRANSPORT[1]; |
| 65 |
|
extern const struct req_step R_STP_RECV[1]; |
| 66 |
|
|
| 67 |
|
struct vxid_pool { |
| 68 |
|
uint64_t next; |
| 69 |
|
uint32_t count; |
| 70 |
|
}; |
| 71 |
|
|
| 72 |
|
/*-------------------------------------------------------------------- |
| 73 |
|
* Private part of worker threads |
| 74 |
|
*/ |
| 75 |
|
|
| 76 |
|
struct worker_priv { |
| 77 |
|
unsigned magic; |
| 78 |
|
#define WORKER_PRIV_MAGIC 0x3047db99 |
| 79 |
|
struct objhead *nobjhead; |
| 80 |
|
struct objcore *nobjcore; |
| 81 |
|
void *nhashpriv; |
| 82 |
|
struct vxid_pool vxid_pool[1]; |
| 83 |
|
struct vcl *vcl; |
| 84 |
|
}; |
| 85 |
|
|
| 86 |
|
/*-------------------------------------------------------------------- |
| 87 |
|
* HTTP Protocol connection structure |
| 88 |
|
* |
| 89 |
|
* This is the protocol independent object for a HTTP connection, used |
| 90 |
|
* both for backend and client sides. |
| 91 |
|
* |
| 92 |
|
*/ |
| 93 |
|
|
| 94 |
|
struct http_conn { |
| 95 |
|
unsigned magic; |
| 96 |
|
#define HTTP_CONN_MAGIC 0x3e19edd1 |
| 97 |
|
|
| 98 |
|
int *rfd; |
| 99 |
|
stream_close_t doclose; |
| 100 |
|
body_status_t body_status; |
| 101 |
|
struct ws *ws; |
| 102 |
|
char *rxbuf_b; |
| 103 |
|
char *rxbuf_e; |
| 104 |
|
char *pipeline_b; |
| 105 |
|
char *pipeline_e; |
| 106 |
|
ssize_t content_length; |
| 107 |
|
void *priv; |
| 108 |
|
|
| 109 |
|
/* Timeouts */ |
| 110 |
|
vtim_dur first_byte_timeout; |
| 111 |
|
vtim_dur between_bytes_timeout; |
| 112 |
|
}; |
| 113 |
|
|
| 114 |
|
enum htc_status_e { |
| 115 |
|
#define HTC_STATUS(e, n, s, l) HTC_S_ ## e = n, |
| 116 |
|
#include "tbl/htc.h" |
| 117 |
|
}; |
| 118 |
|
|
| 119 |
|
typedef enum htc_status_e htc_complete_f(struct http_conn *); |
| 120 |
|
|
| 121 |
|
/* -------------------------------------------------------------------*/ |
| 122 |
|
|
| 123 |
|
extern volatile struct params * cache_param; |
| 124 |
|
|
| 125 |
|
/* ------------------------------------------------------------------- |
| 126 |
|
* The VCF facility is deliberately undocumented, use at your peril. |
| 127 |
|
*/ |
| 128 |
|
|
| 129 |
|
struct vcf_return { |
| 130 |
|
const char *name; |
| 131 |
|
}; |
| 132 |
|
|
| 133 |
|
#define VCF_RETURNS() \ |
| 134 |
|
VCF_RETURN(CONTINUE) \ |
| 135 |
|
VCF_RETURN(DEFAULT) \ |
| 136 |
|
VCF_RETURN(MISS) \ |
| 137 |
|
VCF_RETURN(HIT) |
| 138 |
|
|
| 139 |
|
#define VCF_RETURN(x) extern const struct vcf_return VCF_##x[1]; |
| 140 |
|
VCF_RETURNS() |
| 141 |
|
#undef VCF_RETURN |
| 142 |
|
|
| 143 |
|
typedef const struct vcf_return *vcf_func_f( |
| 144 |
|
struct req *req, |
| 145 |
|
struct objcore **oc, |
| 146 |
|
struct objcore **oc_exp, |
| 147 |
|
int state); |
| 148 |
|
|
| 149 |
|
struct vcf { |
| 150 |
|
unsigned magic; |
| 151 |
|
#define VCF_MAGIC 0x183285d1 |
| 152 |
|
vcf_func_f *func; |
| 153 |
|
void *priv; |
| 154 |
|
}; |
| 155 |
|
|
| 156 |
|
/* Prototypes etc ----------------------------------------------------*/ |
| 157 |
|
|
| 158 |
|
/* cache_backend.c */ |
| 159 |
|
struct backend; |
| 160 |
|
|
| 161 |
|
/* cache_backend_cfg.c */ |
| 162 |
|
void VBE_InitCfg(void); |
| 163 |
|
|
| 164 |
|
/* cache_ban.c */ |
| 165 |
|
|
| 166 |
|
/* for stevedoes resurrecting bans */ |
| 167 |
|
void BAN_Hold(void); |
| 168 |
|
void BAN_Release(void); |
| 169 |
|
void BAN_Reload(const uint8_t *ban, unsigned len); |
| 170 |
|
struct ban *BAN_FindBan(vtim_real t0); |
| 171 |
|
void BAN_RefBan(struct objcore *oc, struct ban *); |
| 172 |
|
vtim_real BAN_Time(const struct ban *ban); |
| 173 |
|
|
| 174 |
|
/* cache_busyobj.c */ |
| 175 |
|
struct busyobj *VBO_GetBusyObj(const struct worker *, const struct req *); |
| 176 |
|
void VBO_ReleaseBusyObj(struct worker *wrk, struct busyobj **busyobj); |
| 177 |
|
void VBO_SetState(struct worker *wrk, struct busyobj *bo, |
| 178 |
|
enum boc_state_e next); |
| 179 |
|
|
| 180 |
|
/* cache_director.c */ |
| 181 |
|
int VDI_GetHdr(struct busyobj *); |
| 182 |
|
VCL_IP VDI_GetIP(struct busyobj *); |
| 183 |
|
void VDI_Finish(struct busyobj *bo); |
| 184 |
|
stream_close_t VDI_Http1Pipe(struct req *, struct busyobj *); |
| 185 |
|
void VDI_Panic(const struct director *, struct vsb *, const char *nm); |
| 186 |
|
void VDI_Event(const struct director *d, enum vcl_event_e ev); |
| 187 |
|
void VDI_Init(void); |
| 188 |
|
|
| 189 |
|
/* cache_deliver_proc.c */ |
| 190 |
|
void VDP_Fini(const struct vdp_ctx *vdc); |
| 191 |
|
void VDP_Init(struct vdp_ctx *vdc, struct worker *wrk, struct vsl_log *vsl, |
| 192 |
|
const struct req *req, const struct busyobj *bo, intmax_t *cl); |
| 193 |
|
uint64_t VDP_Close(struct vdp_ctx *, struct objcore *, struct boc *); |
| 194 |
|
void VDP_Panic(struct vsb *vsb, const struct vdp_ctx *vdc); |
| 195 |
|
int VDP_Push(VRT_CTX, struct vdp_ctx *, struct ws *, const struct vdp *, |
| 196 |
|
void *priv); |
| 197 |
|
int VDP_ObjIterate(void *priv, unsigned flush, const void *ptr, ssize_t len); |
| 198 |
|
int VDP_DeliverObj(struct vdp_ctx *vdc, struct objcore *oc); |
| 199 |
|
extern const struct vdp VDP_gunzip; |
| 200 |
|
extern const struct vdp VDP_esi; |
| 201 |
|
extern const struct vdp VDP_range; |
| 202 |
|
|
| 203 |
|
uint64_t VDPIO_Close(struct vdp_ctx *, struct objcore *, struct boc *); |
| 204 |
|
int VDPIO_Upgrade(VRT_CTX, struct vdp_ctx *vdc); |
| 205 |
|
int VDPIO_Push(VRT_CTX, struct vdp_ctx *, struct ws *, const struct vdp *, |
| 206 |
|
void *priv); |
| 207 |
|
|
| 208 |
|
|
| 209 |
|
/* cache_exp.c */ |
| 210 |
|
vtim_real EXP_Ttl(const struct req *, const struct objcore *); |
| 211 |
|
vtim_real EXP_Ttl_grace(const struct req *, const struct objcore *oc); |
| 212 |
|
void EXP_RefNewObjcore(struct objcore *); |
| 213 |
|
void EXP_Insert(struct worker *wrk, struct objcore *oc); |
| 214 |
|
void EXP_Remove(struct objcore *, const struct objcore *); |
| 215 |
|
|
| 216 |
|
#define EXP_Dttl(req, oc) (oc->ttl - (req->t_req - oc->t_origin)) |
| 217 |
|
|
| 218 |
|
/* cache_expire.c */ |
| 219 |
|
|
| 220 |
|
/* |
| 221 |
|
* The set of variables which control object expiry are inconveniently |
| 222 |
|
* 24 bytes long (double+3*float) and this causes alignment waste if |
| 223 |
|
* we put then in a struct. |
| 224 |
|
* These three macros operate on the struct we don't use. |
| 225 |
|
*/ |
| 226 |
|
|
| 227 |
|
#define EXP_ZERO(xx) \ |
| 228 |
|
do { \ |
| 229 |
|
(xx)->t_origin = 0.0; \ |
| 230 |
|
(xx)->ttl = 0.0; \ |
| 231 |
|
(xx)->grace = 0.0; \ |
| 232 |
|
(xx)->keep = 0.0; \ |
| 233 |
|
} while (0) |
| 234 |
|
|
| 235 |
|
#define EXP_COPY(to,fm) \ |
| 236 |
|
do { \ |
| 237 |
|
(to)->t_origin = (fm)->t_origin; \ |
| 238 |
|
(to)->ttl = (fm)->ttl; \ |
| 239 |
|
(to)->grace = (fm)->grace; \ |
| 240 |
|
(to)->keep = (fm)->keep; \ |
| 241 |
|
} while (0) |
| 242 |
|
|
| 243 |
|
#define EXP_WHEN(to) \ |
| 244 |
|
((to)->t_origin + (to)->ttl + (to)->grace + (to)->keep) |
| 245 |
|
|
| 246 |
|
/* cache_exp.c */ |
| 247 |
|
void EXP_Rearm(struct objcore *oc, vtim_real now, |
| 248 |
|
vtim_dur ttl, vtim_dur grace, vtim_dur keep); |
| 249 |
|
void EXP_Reduce(struct objcore *oc, vtim_real now, |
| 250 |
|
vtim_dur ttl, vtim_dur grace, vtim_dur keep); |
| 251 |
|
|
| 252 |
|
/* From cache_main.c */ |
| 253 |
|
void BAN_Init(void); |
| 254 |
|
void BAN_Compile(void); |
| 255 |
|
void BAN_Shutdown(void); |
| 256 |
|
|
| 257 |
|
/* From cache_hash.c */ |
| 258 |
|
void BAN_NewObjCore(struct objcore *oc); |
| 259 |
|
void BAN_DestroyObj(struct objcore *oc); |
| 260 |
|
int BAN_CheckObject(struct worker *, struct objcore *, struct req *); |
| 261 |
|
|
| 262 |
|
/* cache_busyobj.c */ |
| 263 |
|
void VBO_Init(void); |
| 264 |
|
|
| 265 |
|
/* cache_cli.c [CLI] */ |
| 266 |
|
void CLI_Init(void); |
| 267 |
|
void CLI_Run(void); |
| 268 |
|
void CLI_AddFuncs(struct cli_proto *p); |
| 269 |
|
|
| 270 |
|
/* cache_expire.c */ |
| 271 |
|
void EXP_Init(void); |
| 272 |
|
void EXP_Shutdown(void); |
| 273 |
|
|
| 274 |
|
/* cache_fetch.c */ |
| 275 |
|
enum vbf_fetch_mode_e { |
| 276 |
|
VBF_NORMAL = 0, |
| 277 |
|
VBF_PASS = 1, |
| 278 |
|
VBF_BACKGROUND = 2, |
| 279 |
|
}; |
| 280 |
|
void VBF_Fetch(struct worker *wrk, struct req *req, |
| 281 |
|
struct objcore *oc, struct objcore *oldoc, enum vbf_fetch_mode_e); |
| 282 |
|
const char *VBF_Get_Filter_List(struct busyobj *); |
| 283 |
|
void Bereq_Rollback(VRT_CTX); |
| 284 |
|
|
| 285 |
|
/* cache_fetch_proc.c */ |
| 286 |
|
void VFP_Init(void); |
| 287 |
|
struct vfp_entry *VFP_Push(struct vfp_ctx *, const struct vfp *); |
| 288 |
|
enum vfp_status VFP_GetStorage(struct vfp_ctx *, ssize_t *sz, uint8_t **ptr); |
| 289 |
|
void VFP_Extend(const struct vfp_ctx *, ssize_t sz, enum vfp_status); |
| 290 |
|
void VFP_Setup(struct vfp_ctx *vc, struct worker *wrk); |
| 291 |
|
int VFP_Open(VRT_CTX, struct vfp_ctx *); |
| 292 |
|
uint64_t VFP_Close(struct vfp_ctx *); |
| 293 |
|
|
| 294 |
|
extern const struct vfp VFP_gunzip; |
| 295 |
|
extern const struct vfp VFP_gzip; |
| 296 |
|
extern const struct vfp VFP_testgunzip; |
| 297 |
|
extern const struct vfp VFP_esi; |
| 298 |
|
extern const struct vfp VFP_esi_gzip; |
| 299 |
|
|
| 300 |
|
/* cache_http.c */ |
| 301 |
|
void HTTP_Init(void); |
| 302 |
|
|
| 303 |
|
/* cache_http1_proto.c */ |
| 304 |
|
|
| 305 |
|
htc_complete_f HTTP1_Complete; |
| 306 |
|
uint16_t HTTP1_DissectRequest(struct http_conn *, struct http *); |
| 307 |
|
uint16_t HTTP1_DissectResponse(struct http_conn *, struct http *resp, |
| 308 |
|
const struct http *req); |
| 309 |
|
struct v1l; |
| 310 |
|
unsigned HTTP1_Write(struct v1l *v1l, const struct http *hp, const int*); |
| 311 |
|
|
| 312 |
|
/* cache_main.c */ |
| 313 |
|
vxid_t VXID_Get(const struct worker *, uint64_t marker); |
| 314 |
|
extern pthread_key_t panic_key; |
| 315 |
|
extern pthread_key_t witness_key; |
| 316 |
|
|
| 317 |
|
void THR_SetName(const char *name); |
| 318 |
|
const char* THR_GetName(void); |
| 319 |
|
void THR_SetBusyobj(const struct busyobj *); |
| 320 |
|
struct busyobj * THR_GetBusyobj(void); |
| 321 |
|
void THR_SetRequest(const struct req *); |
| 322 |
|
struct req * THR_GetRequest(void); |
| 323 |
|
void THR_SetWorker(const struct worker *); |
| 324 |
|
struct worker * THR_GetWorker(void); |
| 325 |
|
void THR_Init(void); |
| 326 |
|
|
| 327 |
|
/* cache_lck.c */ |
| 328 |
|
void LCK_Init(void); |
| 329 |
|
|
| 330 |
|
/* cache_mempool.c */ |
| 331 |
|
void MPL_AssertSane(const void *item); |
| 332 |
|
struct mempool * MPL_New(const char *name, volatile struct poolparam *pp, |
| 333 |
|
volatile unsigned *cur_size); |
| 334 |
|
void MPL_Destroy(struct mempool **mpp); |
| 335 |
|
void *MPL_Get(struct mempool *mpl, unsigned *size); |
| 336 |
|
void MPL_Free(struct mempool *mpl, void *item); |
| 337 |
|
|
| 338 |
|
/* cache_obj.c */ |
| 339 |
|
void ObjInit(void); |
| 340 |
|
struct objcore * ObjNew(const struct worker *); |
| 341 |
|
void ObjDestroy(const struct worker *, struct objcore **); |
| 342 |
|
int ObjGetSpace(struct worker *, struct objcore *, ssize_t *sz, uint8_t **ptr); |
| 343 |
|
void ObjExtend(struct worker *, struct objcore *, ssize_t l, int final); |
| 344 |
|
uint64_t ObjWaitExtend(const struct worker *, const struct objcore *, |
| 345 |
|
uint64_t l, enum boc_state_e *statep); |
| 346 |
|
void ObjSetState(struct worker *, struct objcore *, enum boc_state_e next, |
| 347 |
|
unsigned broadcast); |
| 348 |
|
void ObjWaitState(const struct objcore *, enum boc_state_e want); |
| 349 |
|
void ObjTouch(struct worker *, struct objcore *, vtim_real now); |
| 350 |
|
void ObjFreeObj(struct worker *, struct objcore *); |
| 351 |
|
void ObjSlim(struct worker *, struct objcore *); |
| 352 |
|
void *ObjSetAttr(struct worker *, struct objcore *, enum obj_attr, |
| 353 |
|
ssize_t len, const void *); |
| 354 |
|
int ObjCopyAttr(struct worker *, struct objcore *, struct objcore *, |
| 355 |
|
enum obj_attr attr); |
| 356 |
|
void ObjBocDone(struct worker *, struct objcore *, struct boc **); |
| 357 |
|
// VAI |
| 358 |
|
uint64_t ObjVAIGetExtend(struct worker *, const struct objcore *, uint64_t, |
| 359 |
|
enum boc_state_e *, struct vai_qe *); |
| 360 |
|
void ObjVAICancel(struct worker *, struct boc *, struct vai_qe *); |
| 361 |
|
|
| 362 |
|
int ObjSetDouble(struct worker *, struct objcore *, enum obj_attr, double); |
| 363 |
|
int ObjSetU64(struct worker *, struct objcore *, enum obj_attr, uint64_t); |
| 364 |
|
int ObjSetXID(struct worker *, struct objcore *, vxid_t); |
| 365 |
|
|
| 366 |
|
void ObjSetFlag(struct worker *, struct objcore *, enum obj_flags of, int val); |
| 367 |
|
|
| 368 |
|
void ObjSendEvent(struct worker *, struct objcore *oc, unsigned event); |
| 369 |
|
|
| 370 |
|
#define OEV_INSERT (1U<<1) |
| 371 |
|
#define OEV_BANCHG (1U<<2) |
| 372 |
|
#define OEV_TTLCHG (1U<<3) |
| 373 |
|
#define OEV_EXPIRE (1U<<4) |
| 374 |
|
|
| 375 |
|
#define OEV_MASK (OEV_INSERT|OEV_BANCHG|OEV_TTLCHG|OEV_EXPIRE) |
| 376 |
|
|
| 377 |
|
typedef void obj_event_f(struct worker *, void *priv, struct objcore *, |
| 378 |
|
unsigned); |
| 379 |
|
|
| 380 |
|
uintptr_t ObjSubscribeEvents(obj_event_f *, void *, unsigned mask); |
| 381 |
|
void ObjUnsubscribeEvents(uintptr_t *); |
| 382 |
|
|
| 383 |
|
/* cache_panic.c */ |
| 384 |
|
void PAN_Init(void); |
| 385 |
|
int PAN__DumpStruct(struct vsb *vsb, int block, int track, const void *ptr, |
| 386 |
|
const char *smagic, unsigned magic, const char *fmt, ...) |
| 387 |
|
v_printflike_(7,8); |
| 388 |
|
|
| 389 |
|
#define PAN_CheckMagic(vsb, ptr, exp) \ |
| 390 |
|
do { \ |
| 391 |
|
if ((ptr)->magic != (exp)) \ |
| 392 |
|
VSB_printf((vsb), \ |
| 393 |
|
"MAGIC at %p is 0x%08x (Should be: %s/0x%08x)\n", \ |
| 394 |
|
ptr, (ptr)->magic, #exp, exp); \ |
| 395 |
|
} while(0) |
| 396 |
|
|
| 397 |
|
#define PAN_dump_struct(vsb, ptr, magic, ...) \ |
| 398 |
|
PAN__DumpStruct(vsb, 1, 1, ptr, #magic, magic, __VA_ARGS__) |
| 399 |
|
|
| 400 |
|
#define PAN_dump_oneline(vsb, ptr, magic, ...) \ |
| 401 |
|
PAN__DumpStruct(vsb, 0, 1, ptr, #magic, magic, __VA_ARGS__) |
| 402 |
|
|
| 403 |
|
#define PAN_dump_once(vsb, ptr, magic, ...) \ |
| 404 |
|
PAN__DumpStruct(vsb, 1, 0, ptr, #magic, magic, __VA_ARGS__) |
| 405 |
|
|
| 406 |
|
#define PAN_dump_once_oneline(vsb, ptr, magic, ...) \ |
| 407 |
|
PAN__DumpStruct(vsb, 0, 0, ptr, #magic, magic, __VA_ARGS__) |
| 408 |
|
|
| 409 |
|
/* cache_pool.c */ |
| 410 |
|
void Pool_Init(void); |
| 411 |
|
int Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio prio); |
| 412 |
|
int Pool_Task_Arg(struct worker *, enum task_prio, task_func_t *, |
| 413 |
|
const void *arg, size_t arg_len); |
| 414 |
|
void Pool_Sumstat(const struct worker *w); |
| 415 |
|
int Pool_TrySumstat(const struct worker *wrk); |
| 416 |
|
void Pool_PurgeStat(unsigned nobj); |
| 417 |
|
int Pool_Task_Any(struct pool_task *task, enum task_prio prio); |
| 418 |
|
void pan_pool(struct vsb *); |
| 419 |
|
|
| 420 |
|
/* cache_range.c */ |
| 421 |
|
int VRG_CheckBo(struct busyobj *); |
| 422 |
|
|
| 423 |
|
/* cache_req.c */ |
| 424 |
|
struct req *Req_New(struct sess *, const struct req *); |
| 425 |
|
void Req_Release(struct req *); |
| 426 |
|
void Req_Rollback(VRT_CTX); |
| 427 |
|
void Req_Cleanup(struct sess *sp, struct worker *wrk, struct req *req); |
| 428 |
|
void Req_Fail(struct req *req, stream_close_t reason); |
| 429 |
|
void Req_AcctLogCharge(struct VSC_main_wrk *, struct req *); |
| 430 |
|
void Req_LogHit(struct worker *, struct req *, struct objcore *, intmax_t); |
| 431 |
|
const char *Req_LogStart(const struct worker *, struct req *); |
| 432 |
|
|
| 433 |
|
/* cache_req_body.c */ |
| 434 |
|
int VRB_Ignore(struct req *); |
| 435 |
|
ssize_t VRB_Cache(struct req *, ssize_t maxsize); |
| 436 |
|
void VRB_Free(struct req *); |
| 437 |
|
|
| 438 |
|
/* cache_req_fsm.c [CNT] */ |
| 439 |
|
|
| 440 |
|
int Resp_Setup_Deliver(struct req *); |
| 441 |
|
void Resp_Setup_Synth(struct req *); |
| 442 |
|
|
| 443 |
|
enum req_fsm_nxt { |
| 444 |
|
REQ_FSM_MORE, |
| 445 |
|
REQ_FSM_DONE, |
| 446 |
|
REQ_FSM_DISEMBARK, |
| 447 |
|
}; |
| 448 |
|
|
| 449 |
|
void CNT_Embark(struct worker *, struct req *); |
| 450 |
|
enum req_fsm_nxt CNT_Request(struct req *); |
| 451 |
|
|
| 452 |
|
/* cache_session.c */ |
| 453 |
|
void SES_NewPool(struct pool *, unsigned pool_no); |
| 454 |
|
void SES_DestroyPool(struct pool *); |
| 455 |
|
void SES_Wait(struct sess *, const struct transport *); |
| 456 |
|
void SES_Ref(struct sess *sp); |
| 457 |
|
void SES_Rel(struct sess *sp); |
| 458 |
|
|
| 459 |
|
void HTC_Status(enum htc_status_e, const char **, const char **); |
| 460 |
|
void HTC_RxInit(struct http_conn *htc, struct ws *ws); |
| 461 |
|
void HTC_RxPipeline(struct http_conn *htc, char *); |
| 462 |
|
enum htc_status_e HTC_RxStuff(struct http_conn *, htc_complete_f *, |
| 463 |
|
vtim_real *t1, vtim_real *t2, vtim_real ti, vtim_real tn, vtim_dur td, |
| 464 |
|
int maxbytes); |
| 465 |
|
|
| 466 |
|
#define SESS_ATTR(UP, low, typ, len) \ |
| 467 |
|
int SES_Set_##low(const struct sess *sp, const typ *src); \ |
| 468 |
|
int SES_Reserve_##low(struct sess *sp, typ **dst, ssize_t *sz); |
| 469 |
|
#include "tbl/sess_attr.h" |
| 470 |
|
int SES_Set_String_Attr(struct sess *sp, enum sess_attr a, const char *src); |
| 471 |
|
|
| 472 |
|
/* cache_shmlog.c */ |
| 473 |
|
extern struct VSC_main *VSC_C_main; |
| 474 |
|
void VSM_Init(void); |
| 475 |
|
void VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len); |
| 476 |
|
void VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, |
| 477 |
|
vxid_t vxid); |
| 478 |
|
void VSL_End(struct vsl_log *vsl); |
| 479 |
|
void VSL_Flush(struct vsl_log *, int overflow); |
| 480 |
|
|
| 481 |
|
/* cache_conn_pool.c */ |
| 482 |
|
struct conn_pool; |
| 483 |
|
void VCP_Init(void); |
| 484 |
|
void VCP_Panic(struct vsb *, struct conn_pool *); |
| 485 |
|
void VCP_RelPoll(void); |
| 486 |
|
|
| 487 |
|
/* cache_backend_probe.c */ |
| 488 |
|
void VBP_Init(void); |
| 489 |
|
|
| 490 |
|
/* cache_vary.c */ |
| 491 |
|
int VRY_Create(struct busyobj *bo, struct vsb **psb); |
| 492 |
|
int VRY_Match(const struct req *, const uint8_t *vary); |
| 493 |
|
void VRY_Prep(struct req *); |
| 494 |
|
void VRY_Clear(struct req *); |
| 495 |
|
enum vry_finish_flag { KEEP, DISCARD }; |
| 496 |
|
void VRY_Finish(struct req *req, enum vry_finish_flag); |
| 497 |
|
|
| 498 |
|
/* cache_vcl.c */ |
| 499 |
|
void VCL_Bo2Ctx(struct vrt_ctx *, struct busyobj *); |
| 500 |
|
void VCL_Req2Ctx(struct vrt_ctx *, struct req *); |
| 501 |
|
struct vrt_ctx *VCL_Get_CliCtx(int); |
| 502 |
|
struct vsb *VCL_Rel_CliCtx(struct vrt_ctx **); |
| 503 |
|
void VCL_Panic(struct vsb *, const char *nm, const struct vcl *); |
| 504 |
|
void VCL_Poll(void); |
| 505 |
|
void VCL_Init(void); |
| 506 |
|
void VCL_Shutdown(void); |
| 507 |
|
|
| 508 |
|
#define VCL_MET_MAC(l,u,t,b) \ |
| 509 |
|
void VCL_##l##_method(struct vcl *, struct worker *, struct req *, \ |
| 510 |
|
struct busyobj *bo, void *specific); |
| 511 |
|
#include "tbl/vcl_returns.h" |
| 512 |
|
|
| 513 |
|
|
| 514 |
|
typedef int vcl_be_func(struct cli *, struct director *, void *); |
| 515 |
|
|
| 516 |
|
int VCL_IterDirector(struct cli *, const char *, vcl_be_func *, void *); |
| 517 |
|
|
| 518 |
|
/* cache_vrt.c */ |
| 519 |
|
void pan_privs(struct vsb *, const struct vrt_privs *); |
| 520 |
|
|
| 521 |
|
/* cache_vrt_filter.c */ |
| 522 |
|
int VCL_StackVFP(struct vfp_ctx *, const struct vcl *, const char *); |
| 523 |
|
int VCL_StackVDP(struct vdp_ctx *vdc, const struct vcl *vcl, const char *fl, |
| 524 |
|
struct req *req, struct busyobj *bo); |
| 525 |
|
const char *resp_Get_Filter_List(struct req *req); |
| 526 |
|
void VCL_VRT_Init(void); |
| 527 |
|
|
| 528 |
|
/* cache_vrt_vcl.c */ |
| 529 |
|
const char *VCL_Return_Name(unsigned); |
| 530 |
|
const char *VCL_Method_Name(unsigned); |
| 531 |
|
void VCL_Refresh(struct vcl **); |
| 532 |
|
void VCL_Recache(const struct worker *, struct vcl **); |
| 533 |
|
void VCL_Ref(struct vcl *); |
| 534 |
|
void VCL_Rel(struct vcl **); |
| 535 |
|
VCL_BACKEND VCL_DefaultDirector(const struct vcl *); |
| 536 |
|
const struct vrt_backend_probe *VCL_DefaultProbe(const struct vcl *); |
| 537 |
|
|
| 538 |
|
/* cache_vrt_priv.c */ |
| 539 |
|
extern struct vrt_privs cli_task_privs[1]; |
| 540 |
|
void VCL_TaskEnter(struct vrt_privs *); |
| 541 |
|
void VCL_TaskLeave(VRT_CTX, struct vrt_privs *); |
| 542 |
|
|
| 543 |
|
/* cache_vrt_vmod.c */ |
| 544 |
|
void VMOD_Init(void); |
| 545 |
|
void VMOD_Panic(struct vsb *); |
| 546 |
|
|
| 547 |
|
#if defined(ENABLE_COVERAGE) || defined(ENABLE_SANITIZER) |
| 548 |
|
# define DONT_DLCLOSE_VMODS |
| 549 |
|
#endif |
| 550 |
|
|
| 551 |
|
/* cache_wrk.c */ |
| 552 |
|
void WRK_Init(void); |
| 553 |
|
void WRK_AddStat(const struct worker *); |
| 554 |
|
void WRK_Log(enum VSL_tag_e, const char *, ...); |
| 555 |
|
|
| 556 |
|
/* cache_vpi.c */ |
| 557 |
|
extern const size_t vpi_wrk_len; |
| 558 |
|
void VPI_wrk_init(struct worker *, void *, size_t); |
| 559 |
|
void VPI_Panic(struct vsb *, const struct wrk_vpi *, const struct vcl *); |
| 560 |
|
|
| 561 |
|
/* cache_ws.c */ |
| 562 |
|
void WS_Panic(struct vsb *, const struct ws *); |
| 563 |
|
static inline int |
| 564 |
32826 |
WS_IsReserved(const struct ws *ws) |
| 565 |
|
{ |
| 566 |
|
|
| 567 |
32826 |
return (ws->r != NULL); |
| 568 |
|
} |
| 569 |
|
|
| 570 |
|
void *WS_AtOffset(const struct ws *ws, unsigned off, unsigned len); |
| 571 |
|
unsigned WS_ReservationOffset(const struct ws *ws); |
| 572 |
|
int WS_Pipeline(struct ws *, const void *b, const void *e, unsigned rollback); |
| 573 |
|
|
| 574 |
|
/* cache_ws_common.c */ |
| 575 |
|
void WS_Id(const struct ws *ws, char *id); |
| 576 |
|
void WS_Rollback(struct ws *, uintptr_t); |
| 577 |
|
|
| 578 |
|
/* http1/cache_http1_pipe.c */ |
| 579 |
|
void V1P_Init(void); |
| 580 |
|
|
| 581 |
|
/* cache_http2_deliver.c */ |
| 582 |
|
void V2D_Init(void); |
| 583 |
|
|
| 584 |
|
/* stevedore.c */ |
| 585 |
|
void STV_open(void); |
| 586 |
|
void STV_warn(void); |
| 587 |
|
void STV_close(void); |
| 588 |
|
const struct stevedore *STV_next(void); |
| 589 |
|
int STV_BanInfoDrop(const uint8_t *ban, unsigned len); |
| 590 |
|
int STV_BanInfoNew(const uint8_t *ban, unsigned len); |
| 591 |
|
void STV_BanExport(const uint8_t *banlist, unsigned len); |
| 592 |
|
// STV_NewObject() len is space for OBJ_VARATTR |
| 593 |
|
int STV_NewObject(struct worker *, struct objcore *, |
| 594 |
|
const struct stevedore *, unsigned len); |
| 595 |
|
|
| 596 |
|
struct stv_buffer; |
| 597 |
|
struct stv_buffer *STV_AllocBuf(struct worker *wrk, const struct stevedore *stv, |
| 598 |
|
size_t size); |
| 599 |
|
void STV_FreeBuf(struct worker *wrk, struct stv_buffer **pstvbuf); |
| 600 |
|
void *STV_GetBufPtr(struct stv_buffer *stvbuf, size_t *psize); |
| 601 |
|
|
| 602 |
|
#ifdef WITH_PERSISTENT_STORAGE |
| 603 |
|
/* storage_persistent.c */ |
| 604 |
|
void SMP_Ready(void); |
| 605 |
|
#endif |
| 606 |
|
|
| 607 |
|
#define FEATURE(x) COM_FEATURE(cache_param->feature_bits, x) |
| 608 |
|
#define EXPERIMENT(x) COM_EXPERIMENT(cache_param->experimental_bits, x) |
| 609 |
|
#define DO_DEBUG(x) COM_DO_DEBUG(cache_param->debug_bits, x) |
| 610 |
|
|
| 611 |
|
#define DSL(debug_bit, id, ...) \ |
| 612 |
|
do { \ |
| 613 |
|
if (DO_DEBUG(debug_bit)) \ |
| 614 |
|
VSL(SLT_Debug, (id), __VA_ARGS__); \ |
| 615 |
|
} while (0) |
| 616 |
|
|
| 617 |
|
#define DSLb(debug_bit, ...) \ |
| 618 |
|
do { \ |
| 619 |
|
if (DO_DEBUG(debug_bit)) \ |
| 620 |
|
WRK_Log(SLT_Debug, __VA_ARGS__); \ |
| 621 |
|
} while (0) |