r5443 - in branches/2.1/varnish-cache: . bin/varnishd bin/varnishtest/tests lib/libvcl
tfheen at varnish-cache.org
tfheen at varnish-cache.org
Wed Oct 20 15:44:17 CEST 2010
Author: tfheen
Date: 2010-10-20 15:44:17 +0200 (Wed, 20 Oct 2010)
New Revision: 5443
Added:
branches/2.1/varnish-cache/bin/varnishd/cache_dir_dns.c
branches/2.1/varnish-cache/bin/varnishtest/tests/v00029.vtc
branches/2.1/varnish-cache/bin/varnishtest/tests/v00030.vtc
branches/2.1/varnish-cache/lib/libvcl/vcc_dir_dns.c
Modified:
branches/2.1/varnish-cache/configure.ac
Log:
Add missing files for the DNS director
Added: branches/2.1/varnish-cache/bin/varnishd/cache_dir_dns.c
===================================================================
--- branches/2.1/varnish-cache/bin/varnishd/cache_dir_dns.c (rev 0)
+++ branches/2.1/varnish-cache/bin/varnishd/cache_dir_dns.c 2010-10-20 13:44:17 UTC (rev 5443)
@@ -0,0 +1,487 @@
+/*-
+ * Copyright (c) 2009 Redpill Linpro AS
+ * Copyright (c) 2010 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Kristian Lyngstol <kristian at redpill-linpro.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "config.h"
+
+#include "svnid.h"
+SVNID("$Id$")
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <netdb.h>
+#include <unistd.h>
+
+#include <stdio.h>
+#include <netinet/in.h>
+#include "shmlog.h"
+#include "cache.h"
+#include "cache_backend.h"
+#include "vrt.h"
+
+/*--------------------------------------------------------------------*/
+
+/* FIXME: Should eventually be a configurable variable. */
+#define VDI_DNS_MAX_CACHE 1024
+#define VDI_DNS_GROUP_MAX_BACKENDS 1024
+
+/* DNS Cache entry
+ */
+struct vdi_dns_hostgroup {
+ unsigned magic;
+#define VDI_DNSDIR_MAGIC 0x1bacab21
+ char *hostname;
+ struct director *hosts[VDI_DNS_GROUP_MAX_BACKENDS];
+ unsigned nhosts;
+ unsigned next_host; /* Next to use...*/
+ double ttl;
+ VTAILQ_ENTRY(vdi_dns_hostgroup) list;
+};
+
+struct vdi_dns {
+ unsigned magic;
+#define VDI_DNS_MAGIC 0x1337a178
+ struct director dir;
+ struct director **hosts;
+ unsigned nhosts;
+ VTAILQ_HEAD(_cachelist,vdi_dns_hostgroup) cachelist;
+ unsigned ncachelist;
+ pthread_rwlock_t rwlock;
+ const char *suffix;
+ double ttl;
+};
+
+
+
+/* Compare an IPv4 backend to a IPv4 addr/len */
+static int
+vdi_dns_comp_addrinfo4(const struct backend *bp,
+ const struct sockaddr_in *addr,
+ const socklen_t len)
+{
+ uint32_t u, p;
+ struct sockaddr_in *bps = (struct sockaddr_in *) bp->ipv4;
+
+ if (bp->ipv4len != len || len <= 0)
+ return 0;
+
+ u = addr->sin_addr.s_addr;
+ p = bps->sin_addr.s_addr;
+
+ return u == p;
+}
+
+/* Compare an IPv6 backend to a IPv6 addr/len */
+static int
+vdi_dns_comp_addrinfo6(const struct backend *bp,
+ struct sockaddr_in6 *addr,
+ const socklen_t len)
+{
+ uint8_t *u, *p;
+ int i;
+ struct sockaddr_in6 *bps = (struct sockaddr_in6 *) bp->ipv6;
+
+ if (bp->ipv6len != len || len <= 0)
+ return 0;
+
+ u = addr->sin6_addr.s6_addr;
+ p = bps->sin6_addr.s6_addr;
+
+ for (i=0; i < 16; i++) {
+ if (u[i] != p[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Check if a backends socket is the same as addr */
+static int
+vdi_dns_comp_addrinfo(const struct director *dir,
+ struct sockaddr *addr,
+ const socklen_t len)
+{
+ struct backend *bp;
+
+ bp = vdi_get_backend_if_simple(dir);
+ AN(bp);
+ if (addr->sa_family == PF_INET && bp->ipv4) {
+ return (vdi_dns_comp_addrinfo4(bp, (struct sockaddr_in *)
+ addr, len));
+ } else if (addr->sa_family == PF_INET6 && bp->ipv6) {
+ return (vdi_dns_comp_addrinfo6(bp, (struct sockaddr_in6 *)
+ addr, len));
+ }
+ return 0;
+}
+
+/* Pick a host from an existing hostgroup.
+ * Balance on round-robin if multiple backends are available and only pick
+ * healthy ones.
+ */
+static struct director *
+vdi_dns_pick_host(const struct sess *sp, struct vdi_dns_hostgroup *group) {
+ int initial, i, nhosts, current;
+ if (group->nhosts == 0)
+ return (NULL); // In case of error.
+ if (group->next_host >= group->nhosts)
+ group->next_host = 0;
+
+ /* Pick a healthy backend */
+ initial = group->next_host;
+ nhosts = group->nhosts;
+ for (i=0; i < nhosts; i++) {
+ if (i + initial >= nhosts)
+ current = i + initial - nhosts;
+ else
+ current = i + initial;
+ if (VBE_Healthy_sp(sp, group->hosts[current])) {
+ group->next_host = current+1;
+ return group->hosts[current];
+ }
+ }
+
+ return NULL;
+}
+
+/* Remove an item from the dns cache.
+ * If *group is NULL, the head is popped.
+ * Remember locking.
+ */
+static void
+vdi_dns_pop_cache(struct vdi_dns *vs,
+ struct vdi_dns_hostgroup *group)
+{
+ if (group == NULL)
+ group = VTAILQ_LAST( &vs->cachelist, _cachelist );
+ assert(group != NULL);
+ free(group->hostname);
+ VTAILQ_REMOVE(&vs->cachelist, group, list);
+ FREE_OBJ(group);
+ vs->ncachelist--;
+}
+
+/* Dummy in case someone feels like optimizing it? meh...
+ */
+static inline int
+vdi_dns_groupmatch(const struct vdi_dns_hostgroup *group, const char *hostname)
+{
+ return !strcmp(group->hostname, hostname);
+}
+
+/* Search the cache for 'hostname' and put a backend-pointer as necessary,
+ * return true for cache hit. This could still be a NULL backend if we did
+ * a lookup earlier and didn't find a host (ie: cache failed too)
+ *
+ * if rwlock is true, the first timed out object found (if any) is popped
+ * and freed.
+ */
+static int
+vdi_dns_cache_has(const struct sess *sp,
+ struct vdi_dns *vs,
+ const char *hostname,
+ struct director **backend,
+ int rwlock)
+{
+ struct director *ret;
+ struct vdi_dns_hostgroup *hostgr;
+ struct vdi_dns_hostgroup *hostgr2;
+ VTAILQ_FOREACH_SAFE(hostgr, &vs->cachelist, list, hostgr2) {
+ CHECK_OBJ_NOTNULL(hostgr, VDI_DNSDIR_MAGIC);
+ if (hostgr->ttl <= sp->t_req) {
+ if (rwlock)
+ vdi_dns_pop_cache(vs, hostgr);
+ return 0;
+ }
+ if (vdi_dns_groupmatch(hostgr, hostname)) {
+ ret = (vdi_dns_pick_host(sp, hostgr));
+ *backend = ret;
+ if (*backend != NULL)
+ CHECK_OBJ_NOTNULL(*backend, DIRECTOR_MAGIC);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Add a newly cached item to the dns cache list.
+ * (Sorry for the list_add/_add confusion...)
+ */
+static void
+vdi_dns_cache_list_add(const struct sess *sp,
+ struct vdi_dns *vs,
+ struct vdi_dns_hostgroup *new)
+{
+ if (vs->ncachelist >= VDI_DNS_MAX_CACHE) {
+ VSL_stats->dir_dns_cache_full++;
+ vdi_dns_pop_cache(vs, NULL);
+ }
+ CHECK_OBJ_NOTNULL(new, VDI_DNSDIR_MAGIC);
+ assert(new->hostname != 0);
+ new->ttl = sp->t_req + vs->ttl;
+ VTAILQ_INSERT_HEAD(&vs->cachelist, new, list);
+ vs->ncachelist++;
+}
+
+/* Add an item to the dns cache.
+ * XXX: Might want to factor the getaddrinfo() out of the lock and do the
+ * cache_has() afterwards to do multiple dns lookups in parallel...
+ */
+static int
+vdi_dns_cache_add(const struct sess *sp,
+ struct vdi_dns *vs,
+ const char *hostname,
+ struct director **backend)
+{
+ int error, i, host = 0;
+ struct addrinfo *res0, *res, hint;
+ struct vdi_dns_hostgroup *new;
+ /* Due to possible race while upgrading the lock, we have to
+ * recheck if the result is already looked up. The overhead for
+ * this is insignificant unless dns isn't cached properly (all
+ * unique names or something equally troublesome).
+ */
+
+ if (vdi_dns_cache_has(sp, vs, hostname, backend, 1))
+ return 1;
+
+ memset(&hint, 0, sizeof hint);
+ hint.ai_family = PF_UNSPEC;
+ hint.ai_socktype = SOCK_STREAM;
+
+ ALLOC_OBJ(new, VDI_DNSDIR_MAGIC);
+ XXXAN(new);
+ new->hostname = calloc(sizeof(char), strlen(hostname)+1);
+ XXXAN(new->hostname);
+ strcpy(new->hostname, hostname);
+
+ error = getaddrinfo(hostname, "80", &hint, &res0);
+ VSL_stats->dir_dns_lookups++;
+ if (error) {
+ vdi_dns_cache_list_add(sp, vs, new);
+ VSL_stats->dir_dns_failed++;
+ return 0;
+ }
+
+ for (res = res0; res; res = res->ai_next) {
+ if (res->ai_family != PF_INET && res->ai_family != PF_INET6)
+ continue;
+
+ for (i = 0; i < vs->nhosts; i++) {
+ if (vdi_dns_comp_addrinfo(vs->hosts[i],
+ res->ai_addr, res->ai_addrlen)) {
+ new->hosts[host] = vs->hosts[i];
+ CHECK_OBJ_NOTNULL(new->hosts[host],
+ DIRECTOR_MAGIC);
+ host++;
+ }
+ }
+ }
+ freeaddrinfo(res0);
+
+ new->nhosts = host;
+ vdi_dns_cache_list_add(sp, vs, new);
+ *backend = vdi_dns_pick_host(sp, new);
+ return 1;
+}
+
+/* Walk through the cached lookups looking for the relevant host, add one
+ * if it isn't already cached.
+ *
+ * Returns a backend or NULL.
+ */
+static struct director *
+vdi_dns_walk_cache(const struct sess *sp,
+ struct vdi_dns *vs,
+ const char *hostname)
+{
+ struct director *backend = NULL;
+ int ret;
+ AZ(pthread_rwlock_rdlock(&vs->rwlock));
+ ret = vdi_dns_cache_has(sp, vs, hostname, &backend, 0);
+ AZ(pthread_rwlock_unlock(&vs->rwlock));
+ if (!ret) {
+ AZ(pthread_rwlock_wrlock(&vs->rwlock));
+ ret = vdi_dns_cache_add(sp, vs, hostname, &backend);
+ AZ(pthread_rwlock_unlock(&vs->rwlock));
+ } else
+ VSL_stats->dir_dns_hit++;
+
+ /* Bank backend == cached a failure, so to speak */
+ if (backend != NULL)
+ CHECK_OBJ_NOTNULL(backend, DIRECTOR_MAGIC);
+ return backend;
+}
+
+/* Parses the Host:-header and heads out to find a backend.
+ */
+static struct director *
+vdi_dns_find_backend(const struct sess *sp, struct vdi_dns *vs)
+{
+ struct director *ret;
+ struct http *hp;
+ char *p;
+ char hostname[NI_MAXHOST];
+ int i;
+
+ /* bereq is only present after recv et. al, otherwise use req (ie:
+ * use req for health checks in vcl_recv and such).
+ */
+ if (sp->wrk->bereq)
+ hp = sp->wrk->bereq;
+ else
+ hp = sp->http;
+
+
+ CHECK_OBJ_NOTNULL(hp, HTTP_MAGIC);
+ if (http_GetHdr(hp, H_Host, &p) == 0)
+ return (NULL);
+
+ /* We need a working copy since it's going to be modified */
+ strncpy(hostname, p, sizeof(hostname));
+
+ /* remove port-portion of the Host-header, if present. */
+ for (i = 0; i < strlen(hostname); i++) {
+ if (hostname[i] == ':') {
+ hostname[i] = '\0';
+ break;
+ }
+ }
+
+ if (vs->suffix)
+ strncat(hostname, vs->suffix, sizeof(hostname) - strlen(hostname));
+
+ ret = vdi_dns_walk_cache(sp, vs, hostname);
+ return ret;
+}
+
+static struct vbe_conn *
+vdi_dns_getfd(const struct director *director, struct sess *sp)
+{
+ struct vdi_dns *vs;
+ struct director *dir;
+ struct vbe_conn *vbe;
+
+ CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
+ CHECK_OBJ_NOTNULL(director, DIRECTOR_MAGIC);
+ CAST_OBJ_NOTNULL(vs, director->priv, VDI_DNS_MAGIC);
+
+ dir = vdi_dns_find_backend(sp, vs);
+ if (!dir || !VBE_Healthy_sp(sp, dir))
+ return (NULL);
+
+ vbe = VBE_GetFd(dir, sp);
+ return (vbe);
+}
+
+static unsigned
+vdi_dns_healthy(double now, const struct director *dir, uintptr_t target)
+{
+ /* XXX: Fooling -Werror for a bit until it's actually implemented.
+ */
+ if (now || dir || target)
+ return 1;
+ else
+ return 1;
+ return 1;
+ /*
+ struct vdi_dns *vs;
+ struct director *dir;
+ int i;
+
+ CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
+ CHECK_OBJ_NOTNULL(sp->director, DIRECTOR_MAGIC);
+ CAST_OBJ_NOTNULL(vs, sp->director->priv, VDI_DNS_MAGIC);
+
+ dir = vdi_dns_find_backend(sp, vs);
+
+ if (dir)
+ return 1;
+ return 0;
+ */
+}
+
+/*lint -e{818} not const-able */
+static void
+vdi_dns_fini(struct director *d)
+{
+ struct vdi_dns *vs;
+
+ CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
+ CAST_OBJ_NOTNULL(vs, d->priv, VDI_DNS_MAGIC);
+
+ free(vs->hosts);
+ free(vs->dir.vcl_name);
+ vs->dir.magic = 0;
+ /* FIXME: Free the cache */
+ AZ(pthread_rwlock_destroy(&vs->rwlock));
+ FREE_OBJ(vs);
+}
+
+void
+VRT_init_dir_dns(struct cli *cli, struct director **bp, int idx,
+ const void *priv)
+{
+ const struct vrt_dir_dns *t;
+ struct vdi_dns *vs;
+ const struct vrt_dir_dns_entry *te;
+ int i;
+
+ ASSERT_CLI();
+ (void)cli;
+ t = priv;
+ ALLOC_OBJ(vs, VDI_DNS_MAGIC);
+ XXXAN(vs);
+ vs->hosts = calloc(sizeof(struct director *), t->nmember);
+ XXXAN(vs->hosts);
+
+ vs->dir.magic = DIRECTOR_MAGIC;
+ vs->dir.priv = vs;
+ vs->dir.name = "dns";
+ REPLACE(vs->dir.vcl_name, t->name);
+ vs->dir.getfd = vdi_dns_getfd;
+ vs->dir.fini = vdi_dns_fini;
+ vs->dir.healthy = vdi_dns_healthy;
+
+ vs->suffix = t->suffix;
+ vs->ttl = t->ttl;
+
+ te = t->members;
+ for (i = 0; i < t->nmember; i++, te++)
+ vs->hosts[i] = bp[te->host];
+ vs->nhosts = t->nmember;
+ vs->ttl = t->ttl;
+ VTAILQ_INIT(&vs->cachelist);
+ AZ(pthread_rwlock_init(&vs->rwlock, NULL));
+ bp[idx] = &vs->dir;
+}
Added: branches/2.1/varnish-cache/bin/varnishtest/tests/v00029.vtc
===================================================================
--- branches/2.1/varnish-cache/bin/varnishtest/tests/v00029.vtc (rev 0)
+++ branches/2.1/varnish-cache/bin/varnishtest/tests/v00029.vtc 2010-10-20 13:44:17 UTC (rev 5443)
@@ -0,0 +1,45 @@
+# $Id: v00028.vtc 5125 2010-08-25 08:57:04Z phk $
+
+test "DNS director"
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+varnish v1 -vcl+backend {
+ director d1 dns {
+ { .backend = s1; }
+ }
+
+ sub vcl_recv {
+ set req.backend = d1;
+ return (pass);
+ }
+} -start
+
+
+client c1 {
+ txreq -hdr "Host: localhost"
+ rxresp
+ expect resp.status == 200
+
+ txreq -hdr "Host: .......coco-x-zamzam-i-cant-bother-making-it-random"
+ rxresp
+ expect resp.status == 503
+} -run
+
+varnish v2 -vcl {
+
+ director directorname dns {
+ .list = {
+ .host_header = "www.example.com";
+ .port = "80";
+ .connect_timeout = 0.4s;
+ "192.168.15.0"/24;
+ "192.168.16.128"/25;
+ }
+ .ttl = 5m;
+ .suffix = "internal.example.net";
+ }
+}
Added: branches/2.1/varnish-cache/bin/varnishtest/tests/v00030.vtc
===================================================================
--- branches/2.1/varnish-cache/bin/varnishtest/tests/v00030.vtc (rev 0)
+++ branches/2.1/varnish-cache/bin/varnishtest/tests/v00030.vtc 2010-10-20 13:44:17 UTC (rev 5443)
@@ -0,0 +1,53 @@
+# $Id: v00028.vtc 5125 2010-08-25 08:57:04Z phk $
+
+test "DNS director bad VCL tests"
+
+varnish v1 -badvcl {
+ director directorname dns {
+ .list = {
+ 192.168.15.0/24;
+ }
+ }
+}
+
+varnish v1 -badvcl {
+ director directorname dns {
+ .list = {
+ .host_header = "www.example.com";
+ .port = "80";
+ .connect_timeout = 0.4s;
+ }
+ }
+}
+
+varnish v1 -badvcl {
+ director directorname dns {
+ .list = {
+ .host_hdr = "www.example.com";
+ "192.168.16.128"/25;
+ }
+ }
+}
+
+varnish v1 -badvcl {
+ director directorname dns {
+ .list = {
+ .port = 80;
+ "192.168.15.0"/24;
+ }
+ }
+}
+varnish v1 -badvcl {
+ director directorname dns {
+ .list = {
+ "192.168.15.0"/33;
+ }
+ }
+}
+varnish v1 -badvcl {
+ director directorname dns {
+ .list = {
+ "192.168.16.255"/24;
+ }
+ }
+}
Modified: branches/2.1/varnish-cache/configure.ac
===================================================================
--- branches/2.1/varnish-cache/configure.ac 2010-10-20 13:37:02 UTC (rev 5442)
+++ branches/2.1/varnish-cache/configure.ac 2010-10-20 13:44:17 UTC (rev 5443)
@@ -5,7 +5,7 @@
Copyright (c) 2006-2010 Redpill Linpro AS
Copyright (c) 2010 Varnish Software AS])
AC_REVISION([$Id$])
-AC_INIT([Varnish], [2.1.3], [varnish-dev at varnish-cache.org])
+AC_INIT([Varnish], [2.1.4], [varnish-dev at varnish-cache.org])
AC_CONFIG_SRCDIR(include/varnishapi.h)
AM_CONFIG_HEADER(config.h)
Added: branches/2.1/varnish-cache/lib/libvcl/vcc_dir_dns.c
===================================================================
--- branches/2.1/varnish-cache/lib/libvcl/vcc_dir_dns.c (rev 0)
+++ branches/2.1/varnish-cache/lib/libvcl/vcc_dir_dns.c 2010-10-20 13:44:17 UTC (rev 5443)
@@ -0,0 +1,360 @@
+/*-
+ * Copyright (c) 2009 Redpill Linpro AS
+ * Copyright (c) 2010 Varnish Software AS
+ * All rights reserved.
+ *
+ * Author: Kristian Lyngstol <kristian at bohemians.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "svnid.h"
+SVNID("$Id$")
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <limits.h>
+
+#include "vsb.h"
+
+#include "vcc_priv.h"
+#include "vcc_compile.h"
+#include "libvarnish.h"
+
+/*--------------------------------------------------------------------
+ * Parse directors
+ */
+
+
+static struct tokenlist_dir_backend_defaults {
+ char *port;
+ char *hostheader;
+ double connect_timeout;
+ double first_byte_timeout;
+ double between_bytes_timeout;
+ unsigned max_connections;
+ unsigned saint;
+} b_defaults;
+
+static void
+vcc_dir_initialize_defaults(void)
+{
+ b_defaults.port = NULL;
+ b_defaults.hostheader = NULL;
+ b_defaults.connect_timeout = -1.0;
+ b_defaults.first_byte_timeout = -1.0;
+ b_defaults.between_bytes_timeout = -1.0;
+ b_defaults.max_connections = UINT_MAX;
+ b_defaults.saint = UINT_MAX;
+}
+
+static const struct token *dns_first;
+static void
+print_backend(struct tokenlist *tl,
+ int serial,
+ const uint8_t *ip)
+{
+ char vgcname[BUFSIZ];
+ char strip[16];
+ struct token tmptok;
+ struct vsb *vsb;
+
+ bprintf(strip, "%u.%u.%u.%u", ip[3], ip[2], ip[1], ip[0]);
+ tmptok.dec = strip;
+ bprintf(vgcname, "%.*s_%d", PF(tl->t_dir), serial);
+ vsb = vsb_newauto();
+ AN(vsb);
+ tl->fb = vsb;
+ Fc(tl, 0, "\t{ .host = VGC_backend_%s },\n",vgcname);
+ Fh(tl, 1, "\n#define VGC_backend_%s %d\n", vgcname, serial);
+
+ Fb(tl, 0, "\nstatic const struct vrt_backend vgc_dir_priv_%s = {\n",
+ vgcname);
+
+ Fb(tl, 0, "\t.vcl_name = \"%.*s", PF(tl->t_dir));
+ if (serial >= 0)
+ Fb(tl, 0, "[%d]", serial);
+ Fb(tl, 0, "\",\n");
+ Emit_Sockaddr(tl, &tmptok, b_defaults.port);
+ vcc_EmitBeIdent(tl, tl->fb, serial, dns_first , tl->t);
+
+ Fb(tl, 0, "\t.hosthdr = \"");
+ if (b_defaults.hostheader != NULL)
+ Fb(tl,0, b_defaults.hostheader);
+ else
+ Fb(tl,0, strip);
+ Fb(tl, 0, "\",\n");
+
+ Fb(tl, 0, "\t.saintmode_threshold = %d,\n",b_defaults.saint);
+#define FB_TIMEOUT(type) do { \
+ if (b_defaults.type != -1.0) \
+ Fb(tl, 0, "\t.%s = %g,\n",#type,b_defaults.type); \
+ } while (0)
+ FB_TIMEOUT(connect_timeout);
+ FB_TIMEOUT(first_byte_timeout);
+ FB_TIMEOUT(between_bytes_timeout);
+
+ Fb(tl, 0, "};\n");
+ tl->fb = NULL;
+ vsb_finish(vsb);
+ Fh(tl, 0, "%s", vsb_data(vsb));
+ vsb_delete(vsb);
+ Fi(tl, 0, "\tVRT_init_dir(cli, VCL_conf.director, \"simple\",\n"
+ "\t VGC_backend_%s, &vgc_dir_priv_%s);\n", vgcname, vgcname);
+ Ff(tl, 0, "\tVRT_fini_dir(cli, VGCDIR(%s));\n", vgcname);
+ tl->ndirector++;
+}
+
+/*
+ * Output backends for all IPs in the range supplied by
+ * "a[0].a[1].a[2].a[3]/inmask".
+ *
+ * XXX:
+ * This assumes that a uint32_t can be safely accessed as an array of 4
+ * uint8_ts.
+ */
+static void
+vcc_dir_dns_makebackend(struct tokenlist *tl,
+ int *serial,
+ const unsigned char a[],
+ int inmask)
+{
+ uint32_t ip4=0;
+ uint32_t ip4end;
+ uint32_t mask = UINT32_MAX << (32-inmask);
+
+ ip4 |= a[0] << 24;
+ ip4 |= a[1] << 16;
+ ip4 |= a[2] << 8;
+ ip4 |= a[3] ;
+
+ ip4end = ip4 | ~mask;
+ assert (ip4 == (ip4 & mask));
+
+/* printf("uip4: \t0x%.8X\na: \t0x", ip4,ip4);
+ for (int i=0;i<4;i++) printf("%.2X",a[i]);
+ printf("\nmask:\t0x%.8X\nend:\t0x%.8X\n", mask, ip4end);
+*/
+ while (ip4 <= ip4end) {
+ uint8_t *b;
+ b=(uint8_t *)&ip4;
+ (*serial)++;
+ print_backend(tl, *serial, b);
+ ip4++;
+ }
+}
+static void
+vcc_dir_dns_parse_backend_options(struct tokenlist *tl)
+{
+ struct fld_spec *fs;
+ struct token *t_field;
+ double t;
+ unsigned u;
+ vcc_dir_initialize_defaults();
+ fs = vcc_FldSpec(tl,
+ "?port",
+ "?host_header",
+ "?connect_timeout",
+ "?first_byte_timeout",
+ "?between_bytes_timeout",
+ "?max_connections",
+ "?saintmode_threshold",
+ NULL);
+ while (tl->t->tok != CSTR) {
+
+ vcc_IsField(tl, &t_field, fs);
+ ERRCHK(tl);
+ if (vcc_IdIs(t_field, "port")) {
+ ExpectErr(tl, CSTR);
+ assert(tl->t->dec != NULL);
+ b_defaults.port = strdup(tl->t->dec);
+ assert(b_defaults.port);
+ vcc_NextToken(tl);
+ SkipToken(tl, ';');
+ } else if (vcc_IdIs(t_field, "host_header")) {
+ ExpectErr(tl, CSTR);
+ assert(tl->t->dec != NULL);
+ b_defaults.hostheader = strdup(tl->t->dec);
+ assert(b_defaults.hostheader);
+ vcc_NextToken(tl);
+ SkipToken(tl, ';');
+ } else if (vcc_IdIs(t_field, "connect_timeout")) {
+ vcc_TimeVal(tl, &t);
+ ERRCHK(tl);
+ b_defaults.connect_timeout = t;
+ SkipToken(tl, ';');
+ } else if (vcc_IdIs(t_field, "first_byte_timeout")) {
+ vcc_TimeVal(tl, &t);
+ ERRCHK(tl);
+ b_defaults.first_byte_timeout = t;
+ SkipToken(tl, ';');
+ } else if (vcc_IdIs(t_field, "between_bytes_timeout")) {
+ vcc_TimeVal(tl, &t);
+ ERRCHK(tl);
+ b_defaults.between_bytes_timeout = t;
+ SkipToken(tl, ';');
+ } else if (vcc_IdIs(t_field, "max_connections")) {
+ u = vcc_UintVal(tl);
+ ERRCHK(tl);
+ SkipToken(tl, ';');
+ b_defaults.max_connections = u;
+ } else if (vcc_IdIs(t_field, "saintmode_threshold")) {
+ u = vcc_UintVal(tl);
+ /* UINT_MAX == magic number to mark as unset, so
+ * not allowed here.
+ */
+ if (u == UINT_MAX) {
+ vsb_printf(tl->sb,
+ "Value outside allowed range: ");
+ vcc_ErrToken(tl, tl->t);
+ vsb_printf(tl->sb, " at\n");
+ vcc_ErrWhere(tl, tl->t);
+ }
+ ERRCHK(tl);
+ b_defaults.saint = u;
+ SkipToken(tl, ';');
+ } else {
+ ErrInternal(tl);
+ return;
+ }
+
+ }
+}
+
+/* Parse a list of backends with optional /mask notation, then print out
+ * all relevant backends.
+ */
+static void
+vcc_dir_dns_parse_list(struct tokenlist *tl, int *serial)
+{
+ unsigned char a[4],mask;
+ int ret;
+ ERRCHK(tl);
+ SkipToken(tl, '{');
+ if (tl->t->tok != CSTR)
+ vcc_dir_dns_parse_backend_options(tl);
+ while (tl->t->tok == CSTR) {
+ mask = 32;
+ ret = sscanf(tl->t->dec, "%hhu.%hhu.%hhu.%hhu",
+ &a[0], &a[1], &a[2], &a[3]);
+ assert(ret == 4);
+ vcc_NextToken(tl);
+ if (tl->t->tok == '/') {
+ vcc_NextToken(tl);
+ mask = vcc_UintVal(tl);
+ ERRCHK(tl);
+ }
+ vcc_dir_dns_makebackend(tl,serial,a,mask);
+ SkipToken(tl,';');
+ }
+ ExpectErr(tl, '}');
+}
+
+void
+vcc_ParseDnsDirector(struct tokenlist *tl)
+{
+ struct token *t_field, *t_be, *t_suffix = NULL;
+ double ttl = 60.0;
+ int nelem = 0;
+ struct fld_spec *fs;
+ const char *first;
+ char *p;
+ dns_first = tl->t;
+ tl->fb = tl->fc;
+ fs = vcc_FldSpec(tl, "!backend", "?ttl", "?suffix","?list", NULL);
+
+ Fc(tl, 0, "\nstatic const struct vrt_dir_dns_entry "
+ "vddnse_%.*s[] = {\n", PF(tl->t_dir));
+
+ for (; tl->t->tok != '}'; ) { /* List of members */
+ if (tl->t->tok == '{') {
+ nelem++;
+ first = "";
+ t_be = tl->t;
+ vcc_ResetFldSpec(fs);
+
+ ExpectErr(tl, '{');
+ vcc_NextToken(tl);
+ Fc(tl, 0, "\t{");
+
+ while (tl->t->tok != '}') { /* Member fields */
+ vcc_IsField(tl, &t_field, fs);
+ ERRCHK(tl);
+ if (vcc_IdIs(t_field, "backend")) {
+ vcc_ParseBackendHost(tl, nelem, &p);
+ ERRCHK(tl);
+ AN(p);
+ Fc(tl, 0, "%s .host = VGC_backend_%s",
+ first, p);
+ } else {
+ ErrInternal(tl);
+ }
+ first = ", ";
+ }
+ vcc_FieldsOk(tl, fs);
+ if (tl->err) {
+ vsb_printf(tl->sb, "\nIn member host"
+ " specification starting at:\n");
+ vcc_ErrWhere(tl, t_be);
+ return;
+ }
+ Fc(tl, 0, " },\n");
+ } else {
+ vcc_IsField(tl, &t_field, fs);
+ ERRCHK(tl);
+ if (vcc_IdIs(t_field, "suffix")) {
+ ExpectErr(tl, CSTR);
+ t_suffix = tl->t;
+ vcc_NextToken(tl);
+ ExpectErr(tl, ';');
+ } else if (vcc_IdIs(t_field, "ttl")) {
+ vcc_RTimeVal(tl, &ttl);
+ ExpectErr(tl, ';');
+ } else if (vcc_IdIs(t_field, "list")) {
+ vcc_dir_dns_parse_list(tl,&nelem);
+ }
+ }
+ vcc_NextToken(tl);
+ }
+ Fc(tl, 0, "};\n");
+ Fc(tl, 0, "\nstatic const struct vrt_dir_dns vgc_dir_priv_%.*s = {\n",
+ PF(tl->t_dir));
+ Fc(tl, 0, "\t.name = \"%.*s\",\n", PF(tl->t_dir));
+ Fc(tl, 0, "\t.nmember = %d,\n", nelem);
+ Fc(tl, 0, "\t.members = vddnse_%.*s,\n", PF(tl->t_dir));
+ Fc(tl, 0, "\t.suffix = ");
+ if (t_suffix)
+ Fc(tl, 0, "%.*s", PF(t_suffix));
+ else
+ Fc(tl, 0, "\"\"");
+ Fc(tl, 0, ",\n");
+ Fc(tl, 0, "\t.ttl = %f", ttl);
+ Fc(tl, 0, ",\n");
+ Fc(tl, 0, "};\n");
+ Ff(tl, 0, "\tVRT_fini_dir(cli, VGCDIR(_%.*s));\n", PF(tl->t_dir));
+}
More information about the varnish-commit
mailing list