varnish-cache/vmod/vmod_debug_transport_vai.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * Copyright 2024 UPLEX - Nils Goroll Systemoptimierung
4
 * All rights reserved.
5
 *
6
 * Authors: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *          Nils Goroll <slink@uplex.de>
8
 *
9
 * SPDX-License-Identifier: BSD-2-Clause
10
 *
11
 * Redistribution and use in source and binary forms, with or without
12
 * modification, are permitted provided that the following conditions
13
 * are met:
14
 * 1. Redistributions of source code must retain the above copyright
15
 *    notice, this list of conditions and the following disclaimer.
16
 * 2. Redistributions in binary form must reproduce the above copyright
17
 *    notice, this list of conditions and the following disclaimer in the
18
 *    documentation and/or other materials provided with the distribution.
19
 *
20
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30
 * SUCH DAMAGE.
31
 */
32
33
#include "config.h"
34
35
#include "cache/cache_varnishd.h"
36
37
#include "cache/cache_filter.h"
38
#include "cache/cache_transport.h"
39
#include "http1/cache_http1.h"
40
41
#include "vmod_debug.h"
42
43
#define HELLO "hello "
44
45
static int v_matchproto_(vdpio_init_f)
46 64
vdpio_hello_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity)
47
{
48
49 64
        (void)ctx;
50 64
        (void)priv;
51
52 64
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
53 64
        AN(vdc->clen);
54
55 64
        if (*vdc->clen < 0)
56 18
                return (capacity);
57
58 46
        *vdc->clen += strlen(HELLO);
59 46
        http_Unset(vdc->hp, H_Content_Length);
60 46
        http_PrintfHeader(vdc->hp, "Content-Length: %jd", *vdc->clen);
61 46
        return (capacity);
62 64
}
63
64
static int v_matchproto_(vdpio_lease_f)
65 64
vdpio_hello_lease(struct vdp_ctx *vdc, struct vdp_entry *this,
66
    struct vscarab *scarab)
67
{
68
        int r;
69
70 64
        VSCARAB_CHECK_NOTNULL(scarab);
71 64
        if (scarab->used == scarab->capacity)
72 0
                return (0);
73
        //lint -e{446} side effects in initializer - uh?
74 64
        VSCARAB_ADD_IOV_NORET(scarab, ((struct iovec)
75
            {.iov_base = TRUST_ME(HELLO), .iov_len = strlen(HELLO)}));
76 64
        r = vdpio_pull(vdc, this, scarab);
77
78 64
        (void) VDPIO_Close1(vdc, this);
79
80
        // return error from pull
81 64
        if (r < 0)
82 0
                r = 1;
83
        else
84 64
                r += 1;
85
86 64
        return (r);
87 64
}
88
89
static const struct vdp vdp_hello = {
90
        .name = "hello",
91
        .io_init = vdpio_hello_init,
92
        .io_lease = vdpio_hello_lease
93
};
94
95
static void
96 0
dbg_vai_error(struct req *req, struct v1l **v1lp, const char *msg)
97
{
98
99 0
        (void)req;
100 0
        (void)v1lp;
101 0
        (void)msg;
102 0
        INCOMPL();
103 0
}
104
105
static void dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err);
106
static void dbg_vai_deliverobj(struct worker *wrk, void *arg);
107
static void dbg_vai_lease(struct worker *wrk, void *arg);
108
109
static task_func_t *hack_http1_req = NULL;
110
111
// copied from cache_http_deliver.c, then split & modified
112
static enum vtr_deliver_e v_matchproto_(vtr_deliver_f)
113 80
dbg_vai_deliver(struct req *req, int sendbody)
114
{
115
        struct vrt_ctx ctx[1];
116
        struct v1l *v1l;
117 80
        int cap = 0;
118
119 80
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
120 80
        CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC);
121 80
        CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
122
123 80
        if (req->doclose == SC_NULL &&
124 80
            http_HdrIs(req->resp, H_Connection, "close")) {
125 0
                req->doclose = SC_RESP_CLOSE;
126 80
        } else if (req->doclose != SC_NULL) {
127 0
                if (!http_HdrIs(req->resp, H_Connection, "close")) {
128 0
                        http_Unset(req->resp, H_Connection);
129 0
                        http_SetHeader(req->resp, "Connection: close");
130 0
                }
131 80
        } else if (!http_GetHdr(req->resp, H_Connection, NULL))
132 80
                http_SetHeader(req->resp, "Connection: keep-alive");
133
134 80
        CHECK_OBJ_NOTNULL(req->wrk, WORKER_MAGIC);
135
136 160
        v1l = V1L_Open(req->ws, &req->sp->fd, req->vsl,
137 80
            req->t_prev + SESS_TMO(req->sp, send_timeout),
138 80
            cache_param->http1_iovs);
139
140 80
        if (v1l == NULL) {
141 0
                dbg_vai_error(req, &v1l, "Failure to init v1d "
142
                    "(workspace_thread overflow)");
143 0
                return (VTR_D_DONE);
144
        }
145
146
        // Do not roll back req->ws upon V1L_Close()
147 80
        V1L_NoRollback(v1l);
148
149 80
        while (sendbody) {
150 80
                if (!http_GetHdr(req->resp, H_Content_Length, NULL)) {
151 18
                        if (req->http->protover == 11) {
152 18
                                http_SetHeader(req->resp,
153
                                    "Transfer-Encoding: chunked");
154 18
                        } else {
155 0
                                req->doclose = SC_TX_EOF;
156
                        }
157 18
                }
158 80
                INIT_OBJ(ctx, VRT_CTX_MAGIC);
159 80
                VCL_Req2Ctx(ctx, req);
160 80
                cap = VDPIO_Upgrade(ctx, req->vdc);
161 80
                if (cap <= 0) {
162 16
                        if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) {
163 0
                                dbg_vai_error(req, &v1l, "Failure to push v1d");
164 0
                                return (VTR_D_DONE);
165
                        }
166 16
                        break;
167
                }
168 64
                cap = VDPIO_Push(ctx, req->vdc, req->ws, &vdp_hello, NULL);
169 64
                if (cap < 1) {
170 0
                        dbg_vai_error(req, &v1l, "Failure to push hello");
171 0
                        return (VTR_D_DONE);
172
                }
173 64
                cap = VDPIO_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l);
174 64
                if (cap < 1) {
175 0
                        dbg_vai_error(req, &v1l, "Failure to push v1d (vdpio)");
176 0
                        return (VTR_D_DONE);
177
                }
178 64
                break;
179
        }
180
181 80
        if (WS_Overflowed(req->ws)) {
182 0
                dbg_vai_error(req, &v1l, "workspace_client overflow");
183 0
                return (VTR_D_DONE);
184
        }
185
186 80
        if (WS_Overflowed(req->sp->ws)) {
187 0
                dbg_vai_error(req, &v1l, "workspace_session overflow");
188 0
                return (VTR_D_DONE);
189
        }
190
191 80
        if (WS_Overflowed(req->wrk->aws)) {
192 0
                dbg_vai_error(req, &v1l, "workspace_thread overflow");
193 0
                return (VTR_D_DONE);
194
        }
195
196 80
        req->acct.resp_hdrbytes += HTTP1_Write(v1l, req->resp, HTTP1_Resp);
197
198 80
        if (! sendbody) {
199 0
                dbg_vai_deliver_finish(req, &v1l, 0);
200 0
                return (VTR_D_DONE);
201
        }
202
203 80
        (void)V1L_Flush(v1l);
204
205 80
        if (hack_http1_req == NULL)
206 2
                hack_http1_req = req->task->func;
207 80
        AN(hack_http1_req);
208
209 80
        if (cap > 0) {
210 64
                VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_vai_lease cap %d", req->wrk, cap);
211 64
                req->task->func = dbg_vai_lease;
212 64
        }
213
        else {
214 16
                VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_vai_deliverobj", req->wrk);
215 16
                req->task->func = dbg_vai_deliverobj;
216
        }
217 80
        req->task->priv = req;
218
219 80
        req->wrk = NULL;
220 80
        req->vdc->wrk = NULL;
221 80
        req->transport_priv = v1l;
222
223 80
        AZ(Pool_Task(req->sp->pool, req->task, TASK_QUEUE_RUSH));
224 80
        return (VTR_D_DISEMBARK);
225 80
}
226
227
static void v_matchproto_(task_func_t)
228 16
dbg_vai_deliverobj(struct worker *wrk, void *arg)
229
{
230
        struct req *req;
231
        struct v1l *v1l;
232
        const char *p;
233
        int err, chunked;
234
235 16
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
236 16
        CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC);
237 16
        v1l = req->transport_priv;
238 16
        req->transport_priv = NULL;
239 16
        AN(v1l);
240
241 16
        THR_SetRequest(req);
242 16
        VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_vai_deliverobj", wrk);
243 16
        AZ(req->wrk);
244 16
        CNT_Embark(wrk, req);
245 16
        req->vdc->wrk = wrk;    // move to CNT_Embark?
246
247 16
        chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0;
248 16
        if (chunked)
249 0
                V1L_Chunked(v1l);
250 16
        err = VDP_DeliverObj(req->vdc, req->objcore);
251 16
        if (!err && chunked)
252 0
                V1L_EndChunk(v1l);
253 16
        dbg_vai_deliver_finish(req, &v1l, err);
254
255 16
        VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk);
256 16
        wrk->task->func = hack_http1_req;
257 16
        wrk->task->priv = req;
258 16
}
259
260
/*
261
 * copied from sml_notfiy
262
 */
263
struct dbg_vai_notify {
264
        unsigned                magic;
265
#define DBG_VAI_NOTIFY_MAGIC    0xa0154ed5
266
        unsigned                hasmore;
267
        pthread_mutex_t         mtx;
268
        pthread_cond_t          cond;
269
};
270
271
static void
272 64
dbg_vai_notify_init(struct dbg_vai_notify *sn)
273
{
274
275 64
        INIT_OBJ(sn, DBG_VAI_NOTIFY_MAGIC);
276 64
        AZ(pthread_mutex_init(&sn->mtx, NULL));
277 64
        AZ(pthread_cond_init(&sn->cond, NULL));
278 64
}
279
280
static void
281 64
dbg_vai_notify_fini(struct dbg_vai_notify *sn)
282
{
283
284 64
        CHECK_OBJ_NOTNULL(sn, DBG_VAI_NOTIFY_MAGIC);
285 64
        AZ(pthread_mutex_destroy(&sn->mtx));
286 64
        AZ(pthread_cond_destroy(&sn->cond));
287 64
}
288
289
static void v_matchproto_(vai_notify_cb)
290 6
dbg_vai_notify(vai_hdl hdl, void *priv)
291
{
292
        struct dbg_vai_notify *sn;
293
294 6
        (void) hdl;
295 6
        CAST_OBJ_NOTNULL(sn, priv, DBG_VAI_NOTIFY_MAGIC);
296 6
        AZ(pthread_mutex_lock(&sn->mtx));
297 6
        sn->hasmore = 1;
298 6
        AZ(pthread_cond_signal(&sn->cond));
299 6
        AZ(pthread_mutex_unlock(&sn->mtx));
300
301 6
}
302
303
static void
304 6
dbg_vai_notify_wait(struct dbg_vai_notify *sn)
305
{
306
307 6
        CHECK_OBJ_NOTNULL(sn, DBG_VAI_NOTIFY_MAGIC);
308 6
        AZ(pthread_mutex_lock(&sn->mtx));
309 12
        while (sn->hasmore == 0)
310 6
                AZ(pthread_cond_wait(&sn->cond, &sn->mtx));
311 6
        AN(sn->hasmore);
312 6
        sn->hasmore = 0;
313 6
        AZ(pthread_mutex_unlock(&sn->mtx));
314 6
}
315
316
static void
317 64
dbg_vai_lease_done(struct worker *wrk, struct req *req)
318
{
319 64
        VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk);
320 64
        wrk->task->func = hack_http1_req;
321 64
        wrk->task->priv = req;
322 64
}
323
324
static void v_matchproto_(task_func_t)
325 64
dbg_vai_lease(struct worker *wrk, void *arg)
326
{
327
        struct req *req;
328
        struct v1l *v1l;
329
        const char *p;
330 64
        unsigned flags = 0;
331
        int r, cap, err, chunked;
332
333 64
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
334 64
        CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC);
335 64
        v1l = req->transport_priv;
336 64
        req->transport_priv = NULL;
337 64
        AN(v1l);
338
339 64
        THR_SetRequest(req);
340 64
        VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_vai_lease", wrk);
341 64
        AZ(req->wrk);
342 64
        CNT_Embark(wrk, req);
343 64
        req->vdc->wrk = wrk;    // move to CNT_Embark?
344
345 64
        cap = req->vdc->retval;
346 64
        req->vdc->retval = 0;
347 64
        assert(cap > 0);
348
349 64
        VSCARAB_LOCAL(scarab, cap);
350 64
        VSCARET_LOCAL(scaret, cap);
351
352 64
        chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0;
353 64
        if (chunked)
354 18
                V1L_Chunked(v1l);
355
356
        struct dbg_vai_notify notify;
357 64
        dbg_vai_notify_init(&notify);
358
359 64
        if (VDPIO_Init(req->vdc, req->objcore, req->ws, dbg_vai_notify, &notify, scaret)) {
360 0
                dbg_vai_notify_fini(&notify);
361 0
                dbg_vai_deliver_finish(req, &v1l, 1);
362 0
                dbg_vai_lease_done(wrk, req);
363 0
                return;
364
        }
365
366 64
        err = 0;
367 64
        do {
368 140
                r = vdpio_pull(req->vdc, NULL, scarab);
369 140
                flags = scarab->flags; // because vdpio_return_vscarab
370 140
                VSLb(req->vsl, SLT_Debug, "%d = vdpio_pull()", r);
371 140
                (void)V1L_Flush(v1l);
372 140
                vdpio_return_vscarab(req->vdc, scarab);
373
374 140
                if (r == -ENOBUFS || r == -EAGAIN) {
375 6
                        VDPIO_Return(req->vdc);
376 6
                        dbg_vai_notify_wait(&notify);
377 6
                }
378 134
                else if (r < 0) {
379 0
                        err = r;
380 0
                        break;
381
                }
382 140
        } while ((flags & VSCARAB_F_END) == 0);
383
384 64
        if (!err && chunked)
385 18
                V1L_EndChunk(v1l);
386 64
        dbg_vai_deliver_finish(req, &v1l, err);
387 64
        VDPIO_Fini(req->vdc);
388 64
        dbg_vai_notify_fini(&notify);
389 64
        dbg_vai_lease_done(wrk, req);
390 64
}
391
392
static void
393 80
dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err)
394
{
395
        stream_close_t sc;
396
        uint64_t bytes;
397
398 80
        sc = V1L_Close(v1lp, &bytes);
399
400 80
        if (req->vdc->vai_hdl != NULL)
401 64
                req->acct.resp_bodybytes += VDPIO_Close(req->vdc, req->objcore, req->boc);
402 80
        req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc);
403
404 80
        if (sc == SC_NULL && err && req->sp->fd >= 0)
405 0
                sc = SC_REM_CLOSE;
406 80
        if (sc != SC_NULL)
407 0
                Req_Fail(req, sc);
408 80
}
409
410
static struct transport DBG_transport;
411
412
void
413 202
debug_transport_vai_init(void)
414
{
415 202
        DBG_transport = HTTP1_transport;
416 202
        DBG_transport.name = "DBG VAI";
417 202
        DBG_transport.deliver = dbg_vai_deliver;
418 202
}
419
420
void
421 80
debug_transport_vai_use(VRT_CTX)
422
{
423
        struct req *req;
424
425 80
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
426 80
        req = ctx->req;
427 80
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
428
429 80
        if (req->transport != &HTTP1_transport) {
430 0
                VRT_fail(ctx, "Only works on built-in http1 transport");
431 0
                return;
432
        }
433 80
        AZ(req->transport_priv);
434 80
        req->transport = &DBG_transport;
435 80
}