varnish-cache/vmod/vmod_debug_transport_vai.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * Copyright 2024 UPLEX - Nils Goroll Systemoptimierung
4
 * All rights reserved.
5
 *
6
 * Authors: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *          Nils Goroll <slink@uplex.de>
8
 *
9
 * SPDX-License-Identifier: BSD-2-Clause
10
 *
11
 * Redistribution and use in source and binary forms, with or without
12
 * modification, are permitted provided that the following conditions
13
 * are met:
14
 * 1. Redistributions of source code must retain the above copyright
15
 *    notice, this list of conditions and the following disclaimer.
16
 * 2. Redistributions in binary form must reproduce the above copyright
17
 *    notice, this list of conditions and the following disclaimer in the
18
 *    documentation and/or other materials provided with the distribution.
19
 *
20
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30
 * SUCH DAMAGE.
31
 */
32
33
#include "config.h"
34
35
#include "cache/cache_varnishd.h"
36
37
#include "cache/cache_filter.h"
38
#include "cache/cache_transport.h"
39
#include "http1/cache_http1.h"
40
41
#include "vmod_debug.h"
42
43
#define HELLO "hello "
44
45
static int v_matchproto_(vdpio_init_f)
46 48
vdpio_hello_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity)
47
{
48
49 48
        (void)ctx;
50 48
        (void)priv;
51
52 48
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
53 48
        AN(vdc->clen);
54
55 48
        if (*vdc->clen < 0)
56 1
                return (capacity);
57
58 47
        *vdc->clen += strlen(HELLO);
59 47
        http_Unset(vdc->hp, H_Content_Length);
60 47
        http_PrintfHeader(vdc->hp, "Content-Length: %jd", *vdc->clen);
61 47
        return (capacity);
62 48
}
63
64
static int v_matchproto_(vdpio_lease_f)
65 48
vdpio_hello_lease(struct vdp_ctx *vdc, struct vdp_entry *this,
66
    struct vscarab *scarab)
67
{
68
        int r;
69
70 48
        VSCARAB_CHECK_NOTNULL(scarab);
71 48
        if (scarab->used == scarab->capacity)
72 0
                return (0);
73
        //lint -e{446} side effects in initializer - uh?
74 48
        VSCARAB_ADD_IOV_NORET(scarab, ((struct iovec)
75
            {.iov_base = TRUST_ME(HELLO), .iov_len = strlen(HELLO)}));
76 48
        r = vdpio_pull(vdc, this, scarab);
77
78 48
        (void) VDPIO_Close1(vdc, this);
79
80
        // return error from pull
81 48
        if (r < 0)
82 0
                r = 1;
83
        else
84 48
                r += 1;
85
86 48
        return (r);
87 48
}
88
89
static const struct vdp vdp_hello = {
90
        .name = "hello",
91
        .io_init = vdpio_hello_init,
92
        .io_lease = vdpio_hello_lease
93
};
94
95
static void
96 0
dbg_vai_error(struct req *req, struct v1l **v1lp, const char *msg)
97
{
98
99 0
        (void)req;
100 0
        (void)v1lp;
101 0
        (void)msg;
102 0
        INCOMPL();
103 0
}
104
105
static void dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err);
106
static void dbg_vai_deliverobj(struct worker *wrk, void *arg);
107
static void dbg_vai_lease(struct worker *wrk, void *arg);
108
109
static task_func_t *hack_http1_req = NULL;
110
111
// copied from cache_http_deliver.c, then split & modified
112
static enum vtr_deliver_e v_matchproto_(vtr_deliver_f)
113 48
dbg_vai_deliver(struct req *req, int sendbody)
114
{
115
        struct vrt_ctx ctx[1];
116
        struct v1l *v1l;
117 48
        int cap = 0;
118
119 48
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
120 48
        CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC);
121 48
        CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
122
123 48
        if (req->doclose == SC_NULL &&
124 48
            http_HdrIs(req->resp, H_Connection, "close")) {
125 0
                req->doclose = SC_RESP_CLOSE;
126 48
        } else if (req->doclose != SC_NULL) {
127 0
                if (!http_HdrIs(req->resp, H_Connection, "close")) {
128 0
                        http_Unset(req->resp, H_Connection);
129 0
                        http_SetHeader(req->resp, "Connection: close");
130 0
                }
131 48
        } else if (!http_GetHdr(req->resp, H_Connection, NULL))
132 48
                http_SetHeader(req->resp, "Connection: keep-alive");
133
134 48
        CHECK_OBJ_NOTNULL(req->wrk, WORKER_MAGIC);
135
136 96
        v1l = V1L_Open(req->ws, &req->sp->fd, req->vsl,
137 48
            req->t_prev + SESS_TMO(req->sp, send_timeout),
138 48
            cache_param->http1_iovs);
139
140 48
        if (v1l == NULL) {
141 0
                dbg_vai_error(req, &v1l, "Failure to init v1d "
142
                    "(workspace_thread overflow)");
143 0
                return (VTR_D_DONE);
144
        }
145
146
        // Do not roll back req->ws upon V1L_Close()
147 48
        V1L_NoRollback(v1l);
148
149 48
        while (sendbody) {
150 48
                if (!http_GetHdr(req->resp, H_Content_Length, NULL)) {
151 1
                        if (req->http->protover == 11) {
152 1
                                http_SetHeader(req->resp,
153
                                    "Transfer-Encoding: chunked");
154 1
                        } else {
155 0
                                req->doclose = SC_TX_EOF;
156
                        }
157 1
                }
158 48
                INIT_OBJ(ctx, VRT_CTX_MAGIC);
159 48
                VCL_Req2Ctx(ctx, req);
160 48
                cap = VDPIO_Upgrade(ctx, req->vdc);
161 48
                if (cap <= 0) {
162 0
                        if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) {
163 0
                                dbg_vai_error(req, &v1l, "Failure to push v1d");
164 0
                                return (VTR_D_DONE);
165
                        }
166 0
                        break;
167
                }
168 48
                cap = VDPIO_Push(ctx, req->vdc, req->ws, &vdp_hello, NULL);
169 48
                if (cap < 1) {
170 0
                        dbg_vai_error(req, &v1l, "Failure to push hello");
171 0
                        return (VTR_D_DONE);
172
                }
173 48
                cap = VDPIO_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l);
174 48
                if (cap < 1) {
175 0
                        dbg_vai_error(req, &v1l, "Failure to push v1d (vdpio)");
176 0
                        return (VTR_D_DONE);
177
                }
178 48
                break;
179
        }
180
181 48
        if (WS_Overflowed(req->ws)) {
182 0
                dbg_vai_error(req, &v1l, "workspace_client overflow");
183 0
                return (VTR_D_DONE);
184
        }
185
186 48
        if (WS_Overflowed(req->sp->ws)) {
187 0
                dbg_vai_error(req, &v1l, "workspace_session overflow");
188 0
                return (VTR_D_DONE);
189
        }
190
191 48
        if (WS_Overflowed(req->wrk->aws)) {
192 0
                dbg_vai_error(req, &v1l, "workspace_thread overflow");
193 0
                return (VTR_D_DONE);
194
        }
195
196 48
        req->acct.resp_hdrbytes += HTTP1_Write(v1l, req->resp, HTTP1_Resp);
197
198 48
        if (! sendbody) {
199 0
                dbg_vai_deliver_finish(req, &v1l, 0);
200 0
                return (VTR_D_DONE);
201
        }
202
203 48
        (void)V1L_Flush(v1l);
204
205 48
        if (hack_http1_req == NULL)
206 1
                hack_http1_req = req->task->func;
207 48
        AN(hack_http1_req);
208
209 48
        if (cap > 0) {
210 48
                VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_vai_lease cap %d", req->wrk, cap);
211 48
                req->task->func = dbg_vai_lease;
212 48
        }
213
        else {
214 0
                VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_vai_deliverobj", req->wrk);
215 0
                req->task->func = dbg_vai_deliverobj;
216
        }
217 48
        req->task->priv = req;
218
219 48
        req->wrk = NULL;
220 48
        req->vdc->wrk = NULL;
221 48
        req->transport_priv = v1l;
222
223 48
        AZ(Pool_Task(req->sp->pool, req->task, TASK_QUEUE_RUSH));
224 48
        return (VTR_D_DISEMBARK);
225 48
}
226
227
static void v_matchproto_(task_func_t)
228 0
dbg_vai_deliverobj(struct worker *wrk, void *arg)
229
{
230
        struct req *req;
231
        struct v1l *v1l;
232
        const char *p;
233
        int err, chunked;
234
235 0
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
236 0
        CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC);
237 0
        v1l = req->transport_priv;
238 0
        req->transport_priv = NULL;
239 0
        AN(v1l);
240
241 0
        THR_SetRequest(req);
242 0
        VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_vai_deliverobj", wrk);
243 0
        AZ(req->wrk);
244 0
        CNT_Embark(wrk, req);
245 0
        req->vdc->wrk = wrk;    // move to CNT_Embark?
246
247 0
        chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0;
248 0
        if (chunked)
249 0
                V1L_Chunked(v1l);
250 0
        err = VDP_DeliverObj(req->vdc, req->objcore);
251 0
        if (!err && chunked)
252 0
                V1L_EndChunk(v1l);
253 0
        dbg_vai_deliver_finish(req, &v1l, err);
254
255 0
        VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk);
256 0
        wrk->task->func = hack_http1_req;
257 0
        wrk->task->priv = req;
258 0
}
259
260
/*
261
 * copied from sml_notfiy
262
 */
263
struct dbg_vai_notify {
264
        unsigned                magic;
265
#define DBG_VAI_NOTIFY_MAGIC    0xa0154ed5
266
        unsigned                hasmore;
267
        pthread_mutex_t         mtx;
268
        pthread_cond_t          cond;
269
};
270
271
static void
272 48
dbg_vai_notify_init(struct dbg_vai_notify *sn)
273
{
274
275 48
        INIT_OBJ(sn, DBG_VAI_NOTIFY_MAGIC);
276 48
        AZ(pthread_mutex_init(&sn->mtx, NULL));
277 48
        AZ(pthread_cond_init(&sn->cond, NULL));
278 48
}
279
280
static void
281 48
dbg_vai_notify_fini(struct dbg_vai_notify *sn)
282
{
283
284 48
        CHECK_OBJ_NOTNULL(sn, DBG_VAI_NOTIFY_MAGIC);
285 48
        AZ(pthread_mutex_destroy(&sn->mtx));
286 48
        AZ(pthread_cond_destroy(&sn->cond));
287 48
}
288
289
static void v_matchproto_(vai_notify_cb)
290 2
dbg_vai_notify(vai_hdl hdl, void *priv)
291
{
292
        struct dbg_vai_notify *sn;
293
294 2
        (void) hdl;
295 2
        CAST_OBJ_NOTNULL(sn, priv, DBG_VAI_NOTIFY_MAGIC);
296 2
        AZ(pthread_mutex_lock(&sn->mtx));
297 2
        sn->hasmore = 1;
298 2
        AZ(pthread_cond_signal(&sn->cond));
299 2
        AZ(pthread_mutex_unlock(&sn->mtx));
300
301 2
}
302
303
static void
304 2
dbg_vai_notify_wait(struct dbg_vai_notify *sn)
305
{
306
307 2
        CHECK_OBJ_NOTNULL(sn, DBG_VAI_NOTIFY_MAGIC);
308 2
        AZ(pthread_mutex_lock(&sn->mtx));
309 4
        while (sn->hasmore == 0)
310 2
                AZ(pthread_cond_wait(&sn->cond, &sn->mtx));
311 2
        AN(sn->hasmore);
312 2
        sn->hasmore = 0;
313 2
        AZ(pthread_mutex_unlock(&sn->mtx));
314 2
}
315
316
static void
317 48
dbg_vai_lease_done(struct worker *wrk, struct req *req)
318
{
319 48
        VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk);
320 48
        wrk->task->func = hack_http1_req;
321 48
        wrk->task->priv = req;
322 48
}
323
324
static void v_matchproto_(task_func_t)
325 48
dbg_vai_lease(struct worker *wrk, void *arg)
326
{
327
        struct req *req;
328
        struct v1l *v1l;
329
        const char *p;
330 48
        unsigned flags = 0;
331
        int r, cap, err, chunked;
332
333 48
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
334 48
        CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC);
335 48
        v1l = req->transport_priv;
336 48
        req->transport_priv = NULL;
337 48
        AN(v1l);
338
339 48
        THR_SetRequest(req);
340 48
        VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_vai_lease", wrk);
341 48
        AZ(req->wrk);
342 48
        CNT_Embark(wrk, req);
343 48
        req->vdc->wrk = wrk;    // move to CNT_Embark?
344
345 48
        cap = req->vdc->retval;
346 48
        req->vdc->retval = 0;
347 48
        assert(cap > 0);
348
349 48
        VSCARAB_LOCAL(scarab, cap);
350 48
        VSCARET_LOCAL(scaret, cap);
351
352 48
        chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0;
353 48
        if (chunked)
354 1
                V1L_Chunked(v1l);
355
356
        struct dbg_vai_notify notify;
357 48
        dbg_vai_notify_init(&notify);
358
359 48
        if (VDPIO_Init(req->vdc, req->objcore, req->ws, dbg_vai_notify, &notify, scaret)) {
360 0
                dbg_vai_notify_fini(&notify);
361 0
                dbg_vai_deliver_finish(req, &v1l, 1);
362 0
                dbg_vai_lease_done(wrk, req);
363 0
                return;
364
        }
365
366 48
        err = 0;
367 48
        do {
368 100
                r = vdpio_pull(req->vdc, NULL, scarab);
369 100
                flags = scarab->flags; // because vdpio_return_vscarab
370 100
                VSLb(req->vsl, SLT_Debug, "%d = vdpio_pull()", r);
371 100
                (void)V1L_Flush(v1l);
372 100
                vdpio_return_vscarab(req->vdc, scarab);
373
374 100
                if (r == -ENOBUFS || r == -EAGAIN) {
375 2
                        VDPIO_Return(req->vdc);
376 2
                        dbg_vai_notify_wait(&notify);
377 2
                }
378 98
                else if (r < 0) {
379 0
                        err = r;
380 0
                        break;
381
                }
382 100
        } while ((flags & VSCARAB_F_END) == 0);
383
384 48
        if (!err && chunked)
385 1
                V1L_EndChunk(v1l);
386 48
        dbg_vai_deliver_finish(req, &v1l, err);
387 48
        VDPIO_Fini(req->vdc);
388 48
        dbg_vai_notify_fini(&notify);
389 48
        dbg_vai_lease_done(wrk, req);
390 48
}
391
392
static void
393 48
dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err)
394
{
395
        stream_close_t sc;
396
        uint64_t bytes;
397
398 48
        sc = V1L_Close(v1lp, &bytes);
399
400 48
        if (req->vdc->vai_hdl != NULL)
401 48
                req->acct.resp_bodybytes += VDPIO_Close(req->vdc, req->objcore, req->boc);
402 48
        req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc);
403
404 48
        if (sc == SC_NULL && err && req->sp->fd >= 0)
405 0
                sc = SC_REM_CLOSE;
406 48
        if (sc != SC_NULL)
407 0
                Req_Fail(req, sc);
408 48
}
409
410
static struct transport DBG_transport;
411
412
void
413 100
debug_transport_vai_init(void)
414
{
415 100
        DBG_transport = HTTP1_transport;
416 100
        DBG_transport.name = "DBG VAI";
417 100
        DBG_transport.deliver = dbg_vai_deliver;
418 100
}
419
420
void
421 48
debug_transport_vai_use(VRT_CTX)
422
{
423
        struct req *req;
424
425 48
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
426 48
        req = ctx->req;
427 48
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
428
429 48
        if (req->transport != &HTTP1_transport) {
430 0
                VRT_fail(ctx, "Only works on built-in http1 transport");
431 0
                return;
432
        }
433 48
        AZ(req->transport_priv);
434 48
        req->transport = &DBG_transport;
435 48
}