varnish-cache/vmod/vmod_debug_acl.c
0
/*-
1
 * Copyright (c) 2012-2019 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@FreeBSD.org>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
#include "config.h"
31
32
#include <stdlib.h>
33
#include <stdio.h>
34
#include <string.h>
35
#include <sys/socket.h>
36
#include <unistd.h>
37
38
// #include "vdef.h"
39
//#include "vas.h"
40
#include "cache/cache.h"
41
#include "vend.h"
42
#include "vsa.h"
43
#include "vsb.h"
44
#include "vsha256.h"
45
#include "vtcp.h"
46
#include "vtim.h"
47
#include "vcc_debug_if.h"
48
49
VCL_ACL v_matchproto_(td_debug_null_acl)
50 68
xyzzy_null_acl(VRT_CTX)
51
{
52
53 68
        CHECK_OBJ_ORNULL(ctx, VRT_CTX_MAGIC);
54 68
        return (NULL);
55
}
56
57
VCL_ACL v_matchproto_(td_debug_acl)
58 34
xyzzy_acl(VRT_CTX, VCL_ACL acl)
59
{
60
61 34
        CHECK_OBJ_ORNULL(ctx, VRT_CTX_MAGIC);
62 34
        return (acl);
63
}
64
65
VCL_BOOL v_matchproto_(td_debug_match_acl)
66 68
xyzzy_match_acl(VRT_CTX, VCL_ACL acl, VCL_IP ip)
67
{
68
69 68
        CHECK_OBJ_ORNULL(ctx, VRT_CTX_MAGIC);
70 68
        assert(VSA_Sane(ip));
71
72 68
        return (VRT_acl_match(ctx, acl, ip));
73
}
74
75
/*
76
 * The code below is more intimate with VSA than anything is supposed to.
77
 */
78
79
struct acl_sweep {
80
        int                     family;
81
        const uint8_t           *ip0_p;
82
        const uint8_t           *ip1_p;
83
        struct suckaddr         *probe;
84
        uint8_t                 *probe_p;
85
        VCL_INT                 step;
86
        uint64_t                reset;
87
        uint64_t                that;
88
        uint64_t                count;
89
};
90
91
static void
92 34000
reset_sweep(struct acl_sweep *asw)
93
{
94 34000
        asw->that = asw->reset;
95 34000
}
96
97
static int
98 272
setup_sweep(VRT_CTX, struct acl_sweep *asw, VCL_IP ip0, VCL_IP ip1,
99
    VCL_INT step)
100
{
101
        int fam0, fam1;
102
        const uint8_t *ptr;
103
104 272
        AN(asw);
105 272
        memset(asw, 0, sizeof *asw);
106
107 272
        AN(ip0);
108 272
        AN(ip1);
109 272
        fam0 = VSA_GetPtr(ip0, &asw->ip0_p);
110 272
        fam1 = VSA_GetPtr(ip1, &asw->ip1_p);
111 272
        if (fam0 != fam1) {
112 0
                VRT_fail(ctx, "IPs have different families (0x%x vs 0x%x)",
113 0
                    fam0, fam1);
114 0
                return (-1);
115
        }
116
117 272
        asw->family = fam0;
118 272
        if (asw->family == PF_INET) {
119 102
                if (memcmp(asw->ip0_p, asw->ip1_p, 4) > 0) {
120 0
                        VRT_fail(ctx, "Sweep: ipv4.end < ipv4.start");
121 0
                        return (-1);
122
                }
123 102
                asw->reset = vbe32dec(asw->ip0_p);
124 102
        } else {
125 170
                if (memcmp(asw->ip0_p, asw->ip1_p, 16) > 0) {
126 0
                        VRT_fail(ctx, "Sweep: ipv6.end < ipv6.start");
127 0
                        return (-1);
128
                }
129 170
                asw->reset = vbe64dec(asw->ip0_p + 8);
130
        }
131 272
        asw->that = asw->reset;
132
133
        /* Dont try this at home */
134 272
        asw->probe = malloc(vsa_suckaddr_len);
135 272
        AN(asw->probe);
136 272
        memcpy(asw->probe, ip0, vsa_suckaddr_len);
137 272
        (void)VSA_GetPtr(asw->probe, &ptr);
138 272
        asw->probe_p = ((uint8_t*)(asw->probe)) + (ptr - (uint8_t*)asw->probe);
139
140 272
        asw->step = step;
141
142 272
        return (0);
143 272
}
144
145
static void
146 272
cleanup_sweep(struct acl_sweep *asw)
147
{
148 272
        free(asw->probe);
149 272
        memset(asw, 0, sizeof *asw);
150 272
}
151
152
static int
153 3277260
step_sweep(struct acl_sweep *asw)
154
{
155
156 3277260
        AN(asw);
157 3277260
        asw->count++;
158 3277260
        asw->that += asw->step;
159 3277260
        if (asw->family == PF_INET) {
160 3366
                vbe32enc(asw->probe_p, asw->that);
161 3366
                return (memcmp(asw->probe_p, asw->ip1_p, 4));
162
        } else {
163 3273894
                vbe64enc(asw->probe_p + 8, asw->that);
164 3273894
                return (memcmp(asw->probe_p, asw->ip1_p, 16));
165
        }
166 3277260
}
167
168
169
VCL_BLOB
170 238
xyzzy_sweep_acl(VRT_CTX, VCL_ACL acl, VCL_IP ip0, VCL_IP ip1, VCL_INT step)
171
{
172
        struct acl_sweep asw[1];
173
        int i, j;
174
        struct vsb *vsb;
175
        char abuf[VTCP_ADDRBUFSIZE];
176
        char pbuf[VTCP_PORTBUFSIZE];
177
        unsigned char digest[VSHA256_DIGEST_LENGTH];
178
        struct VSHA256Context vsha[1];
179
        struct vrt_blob *b;
180
        ssize_t sz;
181
182 238
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
183 238
        AN(acl);
184 238
        AN(ip0);
185 238
        AN(ip1);
186 238
        assert(step > 0);
187 238
        if (setup_sweep(ctx, asw, ip0, ip1, step))
188 0
                return (NULL);
189
190 238
        vsb = VSB_new_auto();
191 238
        AN(vsb);
192
193 238
        VSHA256_Init(vsha);
194 9996
        for (j = 0; ; j++) {
195 9996
                if ((j & 0x3f) == 0x00) {
196 544
                        VTCP_name(asw->probe, abuf, sizeof abuf,
197 272
                            pbuf, sizeof pbuf);
198 272
                        VSB_printf(vsb, "Sweep: %-15s", abuf);
199 272
                }
200 9996
                i = VRT_acl_match(ctx, acl, asw->probe);
201 9996
                assert(0 <= i && i <= 1);
202 9996
                VSB_putc(vsb, "-X"[i]);
203 9996
                if ((j & 0x3f) == 0x3f) {
204 34
                        AZ(VSB_finish(vsb));
205 34
                        VSLbs(ctx->vsl, SLT_Debug, TOSTRAND(VSB_data(vsb)));
206 34
                        sz =VSB_len(vsb);
207 34
                        assert (sz > 0);
208 34
                        VSHA256_Update(vsha, VSB_data(vsb), sz);
209 34
                        VSB_clear(vsb);
210 34
                }
211 9996
                if (step_sweep(asw) > 0)
212 238
                        break;
213 9758
        }
214 238
        if (VSB_len(vsb)) {
215 238
                AZ(VSB_finish(vsb));
216 238
                VSLbs(ctx->vsl, SLT_Debug, TOSTRAND(VSB_data(vsb)));
217 238
                sz =VSB_len(vsb);
218 238
                assert (sz > 0);
219 238
                VSHA256_Update(vsha, VSB_data(vsb), sz);
220 238
                VSB_clear(vsb);
221 238
        }
222 238
        VSB_destroy(&vsb);
223
224 238
        VSHA256_Final(digest, vsha);
225 238
        b = WS_Alloc(ctx->ws, sizeof *b + sizeof digest);
226 238
        if (b != NULL) {
227 238
                memcpy(b + 1, digest, sizeof digest);
228 238
                b->blob = b + 1;
229 238
                b->len = sizeof digest;
230 238
        }
231 238
        cleanup_sweep(asw);
232 238
        return (b);
233 238
}
234
235
VCL_DURATION
236 34
xyzzy_time_acl(VRT_CTX, VCL_ACL acl, VCL_IP ip0, VCL_IP ip1,
237
    VCL_INT step, VCL_INT turnus)
238
{
239
        struct acl_sweep asw[1];
240
        vtim_mono t0, t1;
241
        vtim_dur d;
242
        VCL_INT cnt;
243
244 34
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
245 34
        AN(acl);
246 34
        AN(ip0);
247 34
        AN(ip1);
248 34
        assert(step > 0);
249 34
        assert(turnus > 0);
250
251 34
        if (setup_sweep(ctx, asw, ip0, ip1, step))
252 0
                return (-1);
253 34
        do {
254 3264
                (void)VRT_acl_match(ctx, acl, asw->probe);
255 3264
        } while (step_sweep(asw) <= 0);
256 34
        asw->count = 0;
257 34
        t0 = VTIM_mono();
258 34034
        for (cnt = 0; cnt < turnus; cnt++) {
259 34000
                reset_sweep(asw);
260 34000
                do {
261 3264000
                        (void)VRT_acl_match(ctx, acl, asw->probe);
262 3264000
                } while (step_sweep(asw) <= 0);
263 34000
        }
264 34
        t1 = VTIM_mono();
265 34
        cnt = asw->count;
266 34
        assert(cnt > 0);
267 34
        d = (t1 - t0) / cnt;
268 68
        VSLb(ctx->vsl, SLT_Debug,
269
            "Timed ACL: %.9f -> %.9f = %.9f %.9f/round, %.9f/IP %ju IPs",
270 34
            t0, t1, t1 - t0, (t1-t0) / turnus, d, (intmax_t)cnt);
271 34
        cleanup_sweep(asw);
272 34
        return (d);
273 34
}
274
275
struct xyzzy_debug_aclobj {
276
        unsigned                        magic;
277
#define VMOD_DEBUG_ACLOBJ_MAGIC 0xac10ac10
278
        char *                          vcl_name;
279
        VCL_ACL                 acl;
280
};
281
282
VCL_VOID v_matchproto_(td_xyzzy_debug_aclobj__init)
283 68
xyzzy_aclobj__init(VRT_CTX, struct VPFX(debug_aclobj) **op,
284
    const char *vcl_name, VCL_ACL acl)
285
{
286
        struct VPFX(debug_aclobj) *o;
287
288 68
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
289 68
        AN(op);
290 68
        AZ(*op);
291 68
        ALLOC_OBJ(o, VMOD_DEBUG_ACLOBJ_MAGIC);
292 68
        AN(o);
293 68
        REPLACE(o->vcl_name, vcl_name);
294 68
        o->acl = acl;
295 68
        *op = o;
296 68
}
297
298
VCL_VOID v_matchproto_(td_xyzzy_debug_aclobj__fini)
299 0
xyzzy_aclobj__fini(struct VPFX(debug_aclobj) **op)
300
{
301
        struct VPFX(debug_aclobj) *o;
302
303 0
        TAKE_OBJ_NOTNULL(o, op, VMOD_DEBUG_ACLOBJ_MAGIC);
304 0
        REPLACE(o->vcl_name, NULL);
305 0
        FREE_OBJ(o);
306 0
}
307
308
VCL_ACL v_matchproto_(td_xyzzy_debug_aclobj_get)
309 68
xyzzy_aclobj_get(VRT_CTX, struct VPFX(debug_aclobj) *o)
310
{
311 68
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
312 68
        CHECK_OBJ_NOTNULL(o, VMOD_DEBUG_ACLOBJ_MAGIC);
313 68
        return (o->acl);
314
}