varnish-cache/vmod/vmod_debug_acl.c
0
/*-
1
 * Copyright (c) 2012-2019 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@FreeBSD.org>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
#include "config.h"
31
32
#include <stdlib.h>
33
#include <stdio.h>
34
#include <string.h>
35
#include <sys/socket.h>
36
#include <unistd.h>
37
38
// #include "vdef.h"
39
//#include "vas.h"
40
#include "cache/cache.h"
41
#include "vend.h"
42
#include "vsa.h"
43
#include "vsb.h"
44
#include "vsha256.h"
45
#include "vtcp.h"
46
#include "vtim.h"
47
#include "vcc_debug_if.h"
48
49
VCL_ACL v_matchproto_(td_debug_null_acl)
50 80
xyzzy_null_acl(VRT_CTX)
51
{
52
53 80
        CHECK_OBJ_ORNULL(ctx, VRT_CTX_MAGIC);
54 80
        return (NULL);
55
}
56
57
VCL_ACL v_matchproto_(td_debug_acl)
58 40
xyzzy_acl(VRT_CTX, VCL_ACL acl)
59
{
60
61 40
        CHECK_OBJ_ORNULL(ctx, VRT_CTX_MAGIC);
62 40
        return (acl);
63
}
64
65
VCL_BOOL v_matchproto_(td_debug_match_acl)
66 80
xyzzy_match_acl(VRT_CTX, VCL_ACL acl, VCL_IP ip)
67
{
68
69 80
        CHECK_OBJ_ORNULL(ctx, VRT_CTX_MAGIC);
70 80
        assert(VSA_Sane(ip));
71
72 80
        return (VRT_acl_match(ctx, acl, ip));
73
}
74
75
/*
76
 * The code below is more intimate with VSA than anything is supposed to.
77
 */
78
79
struct acl_sweep {
80
        int                     family;
81
        const uint8_t           *ip0_p;
82
        const uint8_t           *ip1_p;
83
        struct suckaddr         *probe;
84
        uint8_t                 *probe_p;
85
        VCL_INT                 step;
86
        uint64_t                reset;
87
        uint64_t                that;
88
        uint64_t                count;
89
};
90
91
static void
92 40000
reset_sweep(struct acl_sweep *asw)
93
{
94 40000
        asw->that = asw->reset;
95 40000
}
96
97
static int
98 320
setup_sweep(VRT_CTX, struct acl_sweep *asw, VCL_IP ip0, VCL_IP ip1,
99
    VCL_INT step)
100
{
101
        int fam0, fam1;
102
        const uint8_t *ptr;
103
104 320
        AN(asw);
105 320
        memset(asw, 0, sizeof *asw);
106
107 320
        AN(ip0);
108 320
        AN(ip1);
109 320
        fam0 = VSA_GetPtr(ip0, &asw->ip0_p);
110 320
        fam1 = VSA_GetPtr(ip1, &asw->ip1_p);
111 320
        if (fam0 != fam1) {
112 0
                VRT_fail(ctx, "IPs have different families (0x%x vs 0x%x)",
113 0
                    fam0, fam1);
114 0
                return (-1);
115
        }
116
117 320
        asw->family = fam0;
118 320
        if (asw->family == PF_INET) {
119 120
                if (memcmp(asw->ip0_p, asw->ip1_p, 4) > 0) {
120 0
                        VRT_fail(ctx, "Sweep: ipv4.end < ipv4.start");
121 0
                        return (-1);
122
                }
123 120
                asw->reset = vbe32dec(asw->ip0_p);
124 120
        } else {
125 200
                if (memcmp(asw->ip0_p, asw->ip1_p, 16) > 0) {
126 0
                        VRT_fail(ctx, "Sweep: ipv6.end < ipv6.start");
127 0
                        return (-1);
128
                }
129 200
                asw->reset = vbe64dec(asw->ip0_p + 8);
130
        }
131 320
        asw->that = asw->reset;
132
133
        /* Dont try this at home */
134 320
        asw->probe = malloc(vsa_suckaddr_len);
135 320
        AN(asw->probe);
136 320
        memcpy(asw->probe, ip0, vsa_suckaddr_len);
137 320
        (void)VSA_GetPtr(asw->probe, &ptr);
138 320
        asw->probe_p = ((uint8_t*)(asw->probe)) + (ptr - (uint8_t*)asw->probe);
139
140 320
        asw->step = step;
141
142 320
        return (0);
143 320
}
144
145
static void
146 320
cleanup_sweep(struct acl_sweep *asw)
147
{
148 320
        free(asw->probe);
149 320
        memset(asw, 0, sizeof *asw);
150 320
}
151
152
static int
153 3855600
step_sweep(struct acl_sweep *asw)
154
{
155
156 3855600
        AN(asw);
157 3855600
        asw->count++;
158 3855600
        asw->that += asw->step;
159 3855600
        if (asw->family == PF_INET) {
160 3960
                vbe32enc(asw->probe_p, asw->that);
161 3960
                return (memcmp(asw->probe_p, asw->ip1_p, 4));
162
        } else {
163 3851640
                vbe64enc(asw->probe_p + 8, asw->that);
164 3851640
                return (memcmp(asw->probe_p, asw->ip1_p, 16));
165
        }
166 3855600
}
167
168
169
VCL_BLOB
170 280
xyzzy_sweep_acl(VRT_CTX, VCL_ACL acl, VCL_IP ip0, VCL_IP ip1, VCL_INT step)
171
{
172
        struct acl_sweep asw[1];
173
        int i, j;
174
        struct vsb *vsb;
175
        char abuf[VTCP_ADDRBUFSIZE];
176
        char pbuf[VTCP_PORTBUFSIZE];
177
        unsigned char digest[VSHA256_DIGEST_LENGTH];
178
        struct VSHA256Context vsha[1];
179
        struct vrt_blob *b;
180
        ssize_t sz;
181
182 280
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
183 280
        AN(acl);
184 280
        AN(ip0);
185 280
        AN(ip1);
186 280
        assert(step > 0);
187 280
        if (setup_sweep(ctx, asw, ip0, ip1, step))
188 0
                return (NULL);
189
190 280
        vsb = VSB_new_auto();
191 280
        AN(vsb);
192
193 280
        VSHA256_Init(vsha);
194 11760
        for (j = 0; ; j++) {
195 11760
                if ((j & 0x3f) == 0x00) {
196 640
                        VTCP_name(asw->probe, abuf, sizeof abuf,
197 320
                            pbuf, sizeof pbuf);
198 320
                        VSB_printf(vsb, "Sweep: %-15s", abuf);
199 320
                }
200 11760
                i = VRT_acl_match(ctx, acl, asw->probe);
201 11760
                assert(0 <= i && i <= 1);
202 11760
                VSB_putc(vsb, "-X"[i]);
203 11760
                if ((j & 0x3f) == 0x3f) {
204 40
                        AZ(VSB_finish(vsb));
205 40
                        VSLbs(ctx->vsl, SLT_Debug, TOSTRAND(VSB_data(vsb)));
206 40
                        sz =VSB_len(vsb);
207 40
                        assert (sz > 0);
208 40
                        VSHA256_Update(vsha, VSB_data(vsb), sz);
209 40
                        VSB_clear(vsb);
210 40
                }
211 11760
                if (step_sweep(asw) > 0)
212 280
                        break;
213 11480
        }
214 280
        if (VSB_len(vsb)) {
215 280
                AZ(VSB_finish(vsb));
216 280
                VSLbs(ctx->vsl, SLT_Debug, TOSTRAND(VSB_data(vsb)));
217 280
                sz =VSB_len(vsb);
218 280
                assert (sz > 0);
219 280
                VSHA256_Update(vsha, VSB_data(vsb), sz);
220 280
                VSB_clear(vsb);
221 280
        }
222 280
        VSB_destroy(&vsb);
223
224 280
        VSHA256_Final(digest, vsha);
225 280
        b = WS_Alloc(ctx->ws, sizeof *b + sizeof digest);
226 280
        if (b != NULL) {
227 280
                memcpy(b + 1, digest, sizeof digest);
228 280
                b->blob = b + 1;
229 280
                b->len = sizeof digest;
230 280
        }
231 280
        cleanup_sweep(asw);
232 280
        return (b);
233 280
}
234
235
VCL_DURATION
236 40
xyzzy_time_acl(VRT_CTX, VCL_ACL acl, VCL_IP ip0, VCL_IP ip1,
237
    VCL_INT step, VCL_INT turnus)
238
{
239
        struct acl_sweep asw[1];
240
        vtim_mono t0, t1;
241
        vtim_dur d;
242
        VCL_INT cnt;
243
244 40
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
245 40
        AN(acl);
246 40
        AN(ip0);
247 40
        AN(ip1);
248 40
        assert(step > 0);
249 40
        assert(turnus > 0);
250
251 40
        if (setup_sweep(ctx, asw, ip0, ip1, step))
252 0
                return (-1);
253 40
        do {
254 3840
                (void)VRT_acl_match(ctx, acl, asw->probe);
255 3840
        } while (step_sweep(asw) <= 0);
256 40
        asw->count = 0;
257 40
        t0 = VTIM_mono();
258 40040
        for (cnt = 0; cnt < turnus; cnt++) {
259 40000
                reset_sweep(asw);
260 40000
                do {
261 3840000
                        (void)VRT_acl_match(ctx, acl, asw->probe);
262 3840000
                } while (step_sweep(asw) <= 0);
263 40000
        }
264 40
        t1 = VTIM_mono();
265 40
        cnt = asw->count;
266 40
        assert(cnt > 0);
267 40
        d = (t1 - t0) / cnt;
268 80
        VSLb(ctx->vsl, SLT_Debug,
269
            "Timed ACL: %.9f -> %.9f = %.9f %.9f/round, %.9f/IP %ju IPs",
270 40
            t0, t1, t1 - t0, (t1-t0) / turnus, d, (intmax_t)cnt);
271 40
        cleanup_sweep(asw);
272 40
        return (d);
273 40
}
274
275
struct xyzzy_debug_aclobj {
276
        unsigned                        magic;
277
#define VMOD_DEBUG_ACLOBJ_MAGIC 0xac10ac10
278
        char *                          vcl_name;
279
        VCL_ACL                 acl;
280
};
281
282
VCL_VOID v_matchproto_(td_xyzzy_debug_aclobj__init)
283 80
xyzzy_aclobj__init(VRT_CTX, struct VPFX(debug_aclobj) **op,
284
    const char *vcl_name, VCL_ACL acl)
285
{
286
        struct VPFX(debug_aclobj) *o;
287
288 80
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
289 80
        AN(op);
290 80
        AZ(*op);
291 80
        ALLOC_OBJ(o, VMOD_DEBUG_ACLOBJ_MAGIC);
292 80
        AN(o);
293 80
        REPLACE(o->vcl_name, vcl_name);
294 80
        o->acl = acl;
295 80
        *op = o;
296 80
}
297
298
VCL_VOID v_matchproto_(td_xyzzy_debug_aclobj__fini)
299 0
xyzzy_aclobj__fini(struct VPFX(debug_aclobj) **op)
300
{
301
        struct VPFX(debug_aclobj) *o;
302
303 0
        TAKE_OBJ_NOTNULL(o, op, VMOD_DEBUG_ACLOBJ_MAGIC);
304 0
        REPLACE(o->vcl_name, NULL);
305 0
        FREE_OBJ(o);
306 0
}
307
308
VCL_ACL v_matchproto_(td_xyzzy_debug_aclobj_get)
309 80
xyzzy_aclobj_get(VRT_CTX, struct VPFX(debug_aclobj) *o)
310
{
311 80
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
312 80
        CHECK_OBJ_NOTNULL(o, VMOD_DEBUG_ACLOBJ_MAGIC);
313 80
        return (o->acl);
314
}