varnish-cache/bin/varnishd/cache/cache_deliver_proc.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 */
30
31
#include "config.h"
32
33
#include "cache_varnishd.h"
34
#include "cache_filter.h"
35
#include "cache_objhead.h"
36
37
void
38 125
VDP_Panic(struct vsb *vsb, const struct vdp_ctx *vdc)
39
{
40
        struct vdp_entry *vde;
41
42 125
        if (PAN_dump_struct(vsb, vdc, VDP_CTX_MAGIC, "vdc"))
43 100
                return;
44 25
        VSB_printf(vsb, "nxt = %p,\n", vdc->nxt);
45 25
        VSB_printf(vsb, "retval = %d,\n", vdc->retval);
46
47 25
        if (!VTAILQ_EMPTY(&vdc->vdp)) {
48 0
                VSB_cat(vsb, "filters = {\n");
49 0
                VSB_indent(vsb, 2);
50 0
                VTAILQ_FOREACH(vde, &vdc->vdp, list)
51 0
                        VSB_printf(vsb, "%s = %p { priv = %p }\n",
52 0
                            vde->vdp->name, vde, vde->priv);
53 0
                VSB_indent(vsb, -2);
54 0
                VSB_cat(vsb, "},\n");
55 0
        }
56
57 25
        VSB_indent(vsb, -2);
58 25
        VSB_cat(vsb, "},\n");
59 125
}
60
61
62
63
void
64 84126
VDP_Init(struct vdp_ctx *vdc, struct worker *wrk, struct vsl_log *vsl,
65
    struct req *req)
66
{
67 84126
        AN(vdc);
68 84126
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
69 84126
        AN(vsl);
70 84126
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
71
72 84126
        INIT_OBJ(vdc, VDP_CTX_MAGIC);
73 84126
        VTAILQ_INIT(&vdc->vdp);
74 84126
        vdc->wrk = wrk;
75 84126
        vdc->vsl = vsl;
76 84126
        vdc->req = req;
77 84126
}
78
79
/* VDP_bytes
80
 *
81
 * Pushes len bytes at ptr down the delivery processor list.
82
 *
83
 * This function picks and calls the next delivery processor from the
84
 * list. The return value is the return value of the delivery
85
 * processor. Upon seeing a non-zero return value, that lowest value
86
 * observed is latched in ->retval and all subsequent calls to
87
 * VDP_bytes will return that value directly without calling the next
88
 * processor.
89
 *
90
 * VDP_END marks the end of successful processing, it is issued by
91
 * VDP_DeliverObj() and may also be sent downstream by processors ending the
92
 * stream (for return value != 0)
93
 *
94
 * VDP_END must at most be received once per processor, so any VDP sending it
95
 * downstream must itself not forward it a second time.
96
 *
97
 * Valid return values (of VDP_bytes and any VDP function):
98
 * r < 0:  Error, breaks out early on an error condition
99
 * r == 0: Continue
100
 * r > 0:  Stop, breaks out early without error condition
101
 */
102
103
int
104 193150
VDP_bytes(struct vdp_ctx *vdc, enum vdp_action act,
105
    const void *ptr, ssize_t len)
106
{
107
        int retval;
108
        struct vdp_entry *vdpe;
109
110 193150
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
111 193150
        if (vdc->retval)
112 598
                return (vdc->retval);
113 192552
        vdpe = vdc->nxt;
114 192552
        CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
115
116
        /* at most one VDP_END call */
117 192552
        assert(vdpe->end == VDP_NULL);
118
119 192552
        if (act == VDP_NULL)
120 10564
                assert(len > 0);
121 181988
        else if (act == VDP_END)
122 63123
                vdpe->end = VDP_END;
123
        else
124 118865
                assert(act == VDP_FLUSH);
125
126
        /* Call the present layer, while pointing to the next layer down */
127 192552
        vdc->nxt = VTAILQ_NEXT(vdpe, list);
128 192552
        vdpe->calls++;
129 192552
        vdc->bytes_done = len;
130 192552
        retval = vdpe->vdp->bytes(vdc, act, &vdpe->priv, ptr, len);
131 192552
        vdpe->bytes_in += vdc->bytes_done;
132 192552
        if (retval && (vdc->retval == 0 || retval < vdc->retval))
133 1325
                vdc->retval = retval; /* Latch error value */
134 192552
        vdc->nxt = vdpe;
135 192552
        return (vdc->retval);
136 193150
}
137
138
int
139 69881
VDP_Push(VRT_CTX, struct vdp_ctx *vdc, struct ws *ws, const struct vdp *vdp,
140
    void *priv)
141
{
142
        struct vdp_entry *vdpe;
143
144 69881
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
145 69881
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
146 69881
        AN(ws);
147 69881
        AN(vdp);
148 69881
        AN(vdp->name);
149 69881
        AN(vdp->bytes);
150
151 69881
        if (vdc->retval)
152 0
                return (vdc->retval);
153
154 69881
        if (DO_DEBUG(DBG_PROCESSORS))
155 0
                VSLb(vdc->vsl, SLT_Debug, "VDP_push(%s)", vdp->name);
156
157 69881
        vdpe = WS_Alloc(ws, sizeof *vdpe);
158 69881
        if (vdpe == NULL) {
159 2100
                AZ(vdc->retval);
160 2100
                vdc->retval = -1;
161 2100
                return (vdc->retval);
162
        }
163 67781
        INIT_OBJ(vdpe, VDP_ENTRY_MAGIC);
164 67781
        vdpe->vdp = vdp;
165 67781
        vdpe->priv = priv;
166 67781
        VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list);
167 67781
        vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
168
169 67781
        AZ(vdc->retval);
170 67781
        if (vdpe->vdp->init == NULL)
171 52482
                return (vdc->retval);
172
173 30598
        vdc->retval = vdpe->vdp->init(ctx, vdc, &vdpe->priv,
174 15299
            vdpe == vdc->nxt ? vdc->req->objcore : NULL);
175
176 15299
        if (vdc->retval > 0) {
177 475
                VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
178 475
                vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
179 475
                vdc->retval = 0;
180 475
        }
181 15299
        return (vdc->retval);
182 69881
}
183
184
uint64_t
185 88910
VDP_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc)
186
{
187
        struct vdp_entry *vdpe;
188 88910
        uint64_t rv = 0;
189
190 88910
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
191 88910
        CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC);
192 88910
        CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC);
193 88896
        CHECK_OBJ_ORNULL(boc, BOC_MAGIC);
194
195 156212
        while (!VTAILQ_EMPTY(&vdc->vdp)) {
196 67316
                vdpe = VTAILQ_FIRST(&vdc->vdp);
197 67316
                rv = vdpe->bytes_in;
198 134632
                VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name,
199 67316
                    (uintmax_t)vdpe->calls, (uintmax_t)rv);
200 67316
                if (vdc->retval >= 0)
201 63971
                        AN(vdpe);
202 67316
                if (vdpe != NULL) {
203 67316
                        CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC);
204 67316
                        if (vdpe->vdp->fini != NULL)
205 22474
                                AZ(vdpe->vdp->fini(vdc, &vdpe->priv));
206 67316
                        AZ(vdpe->priv);
207 67316
                        VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
208 67316
                }
209 67316
                vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
210
#ifdef VDP_PEDANTIC_ARMED
211
                // enable when we are confident to get VDP_END right
212
                if (vdc->nxt == NULL && vdc->retval >= 0)
213
                        assert(vdpe->end == VDP_END);
214
#endif
215
        }
216 88896
        if (oc != NULL)
217 88895
                HSH_Cancel(vdc->wrk, oc, boc);
218 88896
        return (rv);
219
}
220
221
/*--------------------------------------------------------------------*/
222
223
static int v_matchproto_(objiterate_f)
224 96123
vdp_objiterate(void *priv, unsigned flush, const void *ptr, ssize_t len)
225
{
226
        enum vdp_action act;
227
228 96123
        if (flush == 0)
229 3814
                act = VDP_NULL;
230 92309
        else if ((flush & OBJ_ITER_END) != 0)
231 54396
                act = VDP_END;
232
        else
233 37913
                act = VDP_FLUSH;
234
235 96123
        return (VDP_bytes(priv, act, ptr, len));
236
}
237
238
239
int
240 54397
VDP_DeliverObj(struct vdp_ctx *vdc, struct objcore *oc)
241
{
242
        int r, final;
243
244 54397
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
245 54397
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
246 54397
        CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC);
247 54397
        AN(vdc->vsl);
248 54397
        vdc->req = NULL;
249 54397
        final = oc->flags & (OC_F_PRIVATE | OC_F_HFM | OC_F_HFP) ? 1 : 0;
250 54397
        r = ObjIterate(vdc->wrk, oc, vdc, vdp_objiterate, final);
251 54397
        if (r < 0)
252 923
                return (r);
253 53474
        return (0);
254 54397
}