varnish-cache/bin/varnishd/cache/cache_busyobj.c
0
/*-
1
 * Copyright (c) 2013-2015 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Martin Blix Grydeland <martin@varnish-software.com>
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 * Handle backend connections and backend request structures.
31
 *
32
 */
33
34
#include "config.h"
35
36
#include <stdlib.h>
37
38
#include "cache_varnishd.h"
39
#include "cache_filter.h"
40
#include "cache_objhead.h"
41
42
static struct mempool           *vbopool;
43
44
/*--------------------------------------------------------------------
45
 */
46
47
void
48 949
VBO_Init(void)
49
{
50
51 1898
        vbopool = MPL_New("busyobj", &cache_param->pool_vbo,
52 949
            &cache_param->workspace_backend);
53 949
        AN(vbopool);
54 949
}
55
56
/*--------------------------------------------------------------------
57
 * BusyObj handling
58
 */
59
60
struct busyobj *
61 2304
VBO_GetBusyObj(const struct worker *wrk, const struct req *req)
62
{
63
        struct busyobj *bo;
64
        uint16_t nhttp;
65
        unsigned sz;
66
        char *p;
67
68 2304
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
69
70 2304
        bo = MPL_Get(vbopool, &sz);
71 2304
        XXXAN(bo);
72 2304
        bo->magic = BUSYOBJ_MAGIC;
73 2304
        bo->end = (char *)bo + sz;
74 2304
        bo->max_retries = cache_param->max_retries;
75
76 2304
        p = (void*)(bo + 1);
77 2304
        p = (void*)PRNDUP(p);
78 2304
        assert(p < bo->end);
79
80 2304
        nhttp = (uint16_t)cache_param->http_max_hdr;
81 2304
        sz = HTTP_estimate(nhttp);
82
83 2304
        bo->bereq0 = HTTP_create(p, nhttp, sz);
84 2304
        p += sz;
85 2304
        p = (void*)PRNDUP(p);
86 2304
        assert(p < bo->end);
87
88 2304
        bo->bereq = HTTP_create(p, nhttp, sz);
89 2304
        p += sz;
90 2304
        p = (void*)PRNDUP(p);
91 2304
        assert(p < bo->end);
92
93 2304
        bo->beresp = HTTP_create(p, nhttp, sz);
94 2304
        p += sz;
95 2304
        p = (void*)PRNDUP(p);
96 2304
        assert(p < bo->end);
97
98 2304
        sz = cache_param->vsl_buffer;
99 2304
        VSL_Setup(bo->vsl, p, sz);
100 2304
        bo->vsl->wid = VXID_Get(wrk, VSL_BACKENDMARKER);
101 2304
        p += sz;
102 2304
        p = (void*)PRNDUP(p);
103 2304
        assert(p < bo->end);
104
105 2304
        bo->vfc = (void*)p;
106 2304
        p += sizeof (*bo->vfc);
107 2304
        p = (void*)PRNDUP(p);
108 2304
        INIT_OBJ(bo->vfc, VFP_CTX_MAGIC);
109
110 2304
        WS_Init(bo->ws, "bo", p, bo->end - p);
111
112 2304
        bo->do_stream = 1;
113
114 2304
        if (req->client_identity != NULL) {
115 1
                bo->client_identity = WS_Copy(bo->ws, req->client_identity, -1);
116 1
                XXXAN(bo->client_identity);
117 1
        }
118
119 2304
        VRT_Assign_Backend(&bo->director_req, req->director_hint);
120 2304
        bo->vcl = req->vcl;
121 2304
        VCL_Ref(bo->vcl);
122
123 2304
        bo->t_first = bo->t_prev = NAN;
124 2304
        bo->connect_timeout = NAN;
125 2304
        bo->first_byte_timeout = NAN;
126 2304
        bo->between_bytes_timeout = NAN;
127
128 2304
        memcpy(bo->digest, req->digest, sizeof bo->digest);
129
130 2304
        return (bo);
131
}
132
133
void
134 2303
VBO_ReleaseBusyObj(struct worker *wrk, struct busyobj **pbo)
135
{
136
        struct busyobj *bo;
137
138 2303
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
139 2303
        TAKE_OBJ_NOTNULL(bo, pbo, BUSYOBJ_MAGIC);
140 2303
        CHECK_OBJ_ORNULL(bo->fetch_objcore, OBJCORE_MAGIC);
141
142 2303
        AZ(bo->htc);
143 2303
        AZ(bo->stale_oc);
144
145 4606
        VSLb(bo->vsl, SLT_BereqAcct, "%ju %ju %ju %ju %ju %ju",
146 2303
            (uintmax_t)bo->acct.bereq_hdrbytes,
147 2303
            (uintmax_t)bo->acct.bereq_bodybytes,
148 2303
            (uintmax_t)(bo->acct.bereq_hdrbytes + bo->acct.bereq_bodybytes),
149 2303
            (uintmax_t)bo->acct.beresp_hdrbytes,
150 2303
            (uintmax_t)bo->acct.beresp_bodybytes,
151 2303
            (uintmax_t)(bo->acct.beresp_hdrbytes + bo->acct.beresp_bodybytes));
152
153 2303
        VSL_End(bo->vsl);
154
155 2303
        if (WS_Overflowed(bo->ws))
156 170
                wrk->stats->ws_backend_overflow++;
157
158 2303
        if (bo->fetch_objcore != NULL) {
159 2273
                (void)HSH_DerefObjCore(wrk, &bo->fetch_objcore);
160 2273
        }
161
162 2303
        VRT_Assign_Backend(&bo->director_req, NULL);
163 2303
        VRT_Assign_Backend(&bo->director_resp, NULL);
164 2303
        VCL_Rel(&bo->vcl);
165
#ifdef ENABLE_WORKSPACE_EMULATOR
166
        WS_Rollback(bo->ws, 0);
167
#endif
168
169 2303
        MPL_Free(vbopool, bo);
170 2303
}
171
172
void
173 5374
VBO_SetState(struct worker *wrk, struct busyobj *bo, enum boc_state_e next)
174
{
175
        unsigned broadcast;
176
177 5374
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
178 5374
        CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
179
180 5374
        switch (next) {
181
        case BOS_REQ_DONE:
182 2253
                AN(bo->req);
183 2253
                bo->req = NULL;
184 2253
                broadcast = bo->is_bgfetch;
185 2253
                break;
186
        case BOS_STREAM:
187 848
                AN(bo->do_stream);
188 848
                AZ(bo->req);
189 848
                broadcast = 1;
190 848
                break;
191
        case BOS_FINISHED:
192
        case BOS_FAILED:
193
                /* We can't assert that either state already released its
194
                 * request because a fetch may fail before reaching the
195
                 * BOS_REQ_DONE state. Failing can also mean executing
196
                 * vcl_backend_error and reaching BOS_FINISHED from there.
197
                 * One can legitemately return(retry) from there and proceed
198
                 * again with a usable req if a return(error) transition led
199
                 * to vcl_backend_error instead of a failed fetch attempt.
200
                 */
201 2273
                bo->req = NULL;
202 2273
                broadcast = 1;
203 2273
                break;
204
        default:
205 0
                WRONG("unexpected BOC state");
206 0
        }
207
208 5374
        ObjSetState(wrk, bo->fetch_objcore, next, broadcast);
209 5374
}