formatted:: does it possible with varnish and how to do it
Швабауэр Павел
shvabauer at arsenal-d.uz
Fri Mar 18 15:15:42 CET 2016
Good day team,
I'm using varnish in our projects already long time, so thank you very
much for this great technology!
Let me explain 1 thing I can't realize already 2 weeks.
so, here is task. Please take a look, we are using some sort of security
in links, so every client has own generated links. An example:
client1: /2ca6d77ff25927870b135c389c4692ef/book/
a7a5b1ce35c454dbc4abf6425739e9cd890cd68c640d3747947bc502d9da9e2a5aa93c3069f29630/thing1.txt
client2:
/2ca6d77ff25927870b135c389c4692ef/book/ea0c89a5895321aa7af058492d7ba33e0395bc04efc490e4ad1d4c29f0788446b9d0fee67a606s4t/thing1.txt
client3:
/2ca6d77ff25927870b135c389c4692ef/book/ea0c89a5895320d96d798f0ec4dc13b69f6c342d2f5cf86d09374d141c9119515003b1abb2351d4f/thing1.txt
So, based on clean URL we can't build cache, that's why we have to
modify urls to cache them like just:
/2ca6d77ff25927870b135c389c4692ef/thing1.txt
But get proper response possible if URL like:
/magazine/ea0c89a5895320d96d798f0ec4dc13b69f6c342d2f5cf86d09374d141c9119515003b1abb2351d4f/thing1.txt
So, how is it possible to reach with varnish, to lookup objects by
custom key, fetch by custom url and put to cache by custom key?
I already created URL rewriting and it works properly, varnish fetching
content and sends to client, but not caching it at all.
Also, I need a way to pause some clients requesting same object till
first will not reach it, so needs to make a queue.
Here is my current config I'm using (please remove all things not
important):
sub vcl_recv {
set req.backend_hint = vdir.backend(); # send all traffic to the vdir
director
set req.url = std.querysort(req.url);
unset req.http.cookie;
if (req.method == "GET" && (req.url ~ "book")) {
std.log("we are passing it");
return (pass);
}
std.log("we are looking for the cache ");
return (hash);
}
sub vcl_pipe {
#std.log("piped url:::"+bereq.url);
#if (bereq.url ~ "/origin/") {
# set bereq.url = regsub(req.url, "(.*)/(.*)/(.*)/(.*)$", "\1/\4");
# std.log("piped::"+bereq.url);
# }
set bereq.http.Connection = "Close";
return (pipe);
}
# The data on which the hashing will take place
sub vcl_hash {
###### here we should modify url to try to return from cache
if (req.url ~ "/book/$") {
set req.http.cacheurl = regsub(req.url, "(.*)/(.*)/(.*)/(.*)$",
"\1/\4");
std.log("hash rewrite was:" + req.url);
std.log("become ::: "+req.http.cacheurl);
} else {
set req.http.cacheurl = req.url;
}
hash_data(req.http.cacheurl);
if (req.http.host) {
hash_data(req.http.host);
} else {
hash_data(server.ip);
}
}
sub vcl_backend_fetch {
if (bereq.url ~ "/book/") {
set bereq.url = regsub(bereq.url, "(.*)/(.*)/(.*)/(.*)$",
"/magazine/\3/\4");
std.log("backend fetch::"+bereq.url);
}
return (fetch);
}
sub vcl_hit {
# Called when a cache lookup is successful.
if (obj.ttl >= 0s) {
# A pure unadultered hit, deliver it
return (deliver);
}
# if (!std.healthy(req.backend_hint) && (obj.ttl + obj.grace > 0s)) {
# return (deliver);
# } else {
# return (fetch);
# }
# We have no fresh fish. Lets look at the stale ones.
if (std.healthy(req.backend_hint)) {
# Backend is healthy. Limit age to 10s.
if (obj.ttl + 10s > 0s) {
#set req.http.grace = "normal(limited)";
return (deliver);
} else {
# No candidate for grace. Fetch a fresh object.
return(fetch);
}
} else {
# backend is sick - use full grace
if (obj.ttl + obj.grace > 0s) {
#set req.http.grace = "full";
return (deliver);
} else {
# no graced object.
return (fetch);
}
}
# fetch & deliver once we get the result
return (fetch); # Dead code, keep as a safeguard
}
sub vcl_miss {
std.log("vcl_miss "+req.url);
return (fetch);
}
# Handle the HTTP request coming from our backend
sub vcl_backend_response {
set beresp.grace = 20m;
# Called after the response headers has been successfully retrieved
from the backend.
unset beresp.http.set-cookie;
# Set 2min cache if unset for static files
set beresp.ttl = 1200s;
std.log("response url:::"+bereq.url);
###### here we should rewrite url to send it to wowza then
if (bereq.url ~ "/live/") {
set bereq.url = regsub(bereq.url, "(.*)/(.*)/(.*)/(.*)$", "\1/\4");
}
# Allow stale content, in case the backend goes down.
# make Varnish keep all objects for 6 hours beyond their TTL
return (deliver);
}
sub vcl_deliver {
if (req.url ~ "/book/") {
set req.url = regsub(req.url, "(.*)/(.*)/(.*)/(.*)$", "\1/\4");
std.log("delivering :: "+req.url);
}
if (obj.hits > 0) { # Add debug header to see if it's a HIT/MISS and
the number of hits, disable when not needed
set resp.http.X-Cache = "HIT";
} else {
set resp.http.X-Cache = "MISS";
}
# Please note that obj.hits behaviour changed in 4.0, now it counts
per objecthead, not per object
# and obj.hits may not be reset in some cases where bans are in use.
See bug 1492 for details.
# So take hits with a grain of salt
set resp.http.X-Cache-Hits = obj.hits;
# Remove some headers: PHP version
unset resp.http.X-Powered-By;
# Remove some headers: Apache version & OS
unset resp.http.Server;
unset resp.http.X-Drupal-Cache;
unset resp.http.X-Varnish;
unset resp.http.Via;
unset resp.http.Link;
unset resp.http.X-Generator;
return (deliver);
}
sub vcl_purge {
# Only handle actual PURGE HTTP methods, everything else is discarded
if (req.method != "PURGE") {
# restart request
set req.http.X-Purge = "Yes";
return(restart);
}
}
sub vcl_synth {
if (resp.status == 720) {
# We use this special error status 720 to force redirects with 301
(permanent) redirects
# To use this, call the following from anywhere in vcl_recv: return
(synth(720, "http://host/new.html"));
set resp.http.Location = resp.reason;
set resp.status = 301;
return (deliver);
} elseif (resp.status == 721) {
# And we use error status 721 to force redirects with a 302
(temporary) redirect
# To use this, call the following from anywhere in vcl_recv: return
(synth(720, "http://host/new.html"));
set resp.http.Location = resp.reason;
set resp.status = 302;
return (deliver);
}
return (deliver);
}
sub vcl_fini {
# Called when VCL is discarded only after all requests have exited
the VCL.
# Typically used to clean up VMODs.
return (ok);
}
More information about the varnish-misc
mailing list