Created
December 31, 2011 05:27
-
-
Save jeremyjbowers/1542992 to your computer and use it in GitHub Desktop.
A complex Varnish configuration file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* | |
* Instead of a single backend, let's set up a more complex multi-server director. | |
* This director will randomly assign each request to one of three application servers. | |
* | |
*/ | |
director backend random { | |
.retries = 5; | |
{ | |
.backend = { | |
.host = "a.website.com"; | |
.port = "8000"; | |
} | |
.weight = 1; | |
} | |
{ | |
.backend = { | |
.host = "b.website.com"; | |
.port = "8000"; | |
} | |
.weight = 1; | |
} | |
{ | |
.backend = { | |
.host = "c.website.com"; | |
.port = "8000"; | |
} | |
.weight = 1; | |
} | |
} | |
/* | |
* | |
* Next, let's prepare a feature where we can purge or prime URLs. | |
* We don't want just anyone to do this. So we'll limit this special | |
* request type to only people on the local network. | |
* | |
*/ | |
acl purge_prime { | |
"127.0.0.1"; | |
"10.0.0.0"/8; | |
"172.16.0.0"/12; | |
} | |
/* | |
* | |
* Let's set up the receive subroutine for incoming traffic. | |
* | |
*/ | |
sub vcl_recv { | |
# First, check to see if this is a special PURGE request. | |
# If it is, and the client isn't in our special access | |
# control list above, send them a cheeky message. | |
if (req.request == "PURGE") { | |
if (!client.ip ~ purge_prime) { | |
error 405 "No purge for you. (" + client.ip + ")"; | |
} | |
} | |
# Same for PRIME requests. | |
if (req.request == "PRIME") { | |
if (!client.ip ~ purge_prime) { | |
error 405 "No priming for you. (" + client.ip + ")"; | |
} | |
} | |
# You know, people really like to use jQuery. | |
# The jQuery $.ajax() and similar functions will pull JSON from an API | |
# behind your Varnish server. Sadly, browsers will cache the JSON | |
# response basically forever unless you use the cache: false declaration. | |
# | |
# In practice, this just appends a new URL parameter to the request, like | |
# &_=1234567890, but it changes with each request. This sucks for caching. | |
# | |
# This function performs a regex substitution. The regex here matches | |
# URL parameters that start with ? or &, follow with _= and then | |
# contain up to 25 other characters EXCEPT for an ampersand. | |
# An ampersand means that we're starting a second URL parameter. | |
# | |
# By stripping this parameter out, we can serve a cached response for | |
# the ever-changing URL since all but the &_= parameter stay identical. | |
set req.url = regsuball(req.url,"[?&]_=[^&]{1,25}",""); | |
# Set the backend to our director above. | |
set req.backend = backend; | |
# Grace mode is magical. When you have a request for an object that is in | |
# the cache but has expired, Grace mode will continue to serve the old | |
# expired cache object for everyone except for the very first client | |
# to ask for it. Once the updated response is available for that first | |
# client, all of the other clients will then get the updated response. | |
# | |
# By setting grace to 5 minutes, we tell Varnish to continue to serve | |
# an outdated object for up to 5 minutes past its expiration to cover | |
# us while we fetch an updated response. Works great for slow pages. | |
set req.grace = 5m; | |
# Pass the request along to the cache lookup. | |
return(lookup); | |
} | |
/* | |
* | |
* The cache miss subroutine. | |
* | |
*/ | |
sub vcl_miss { | |
# If someone sends along a PURGE request from our special list of | |
# acceptable addresses, we need to ban a stack of URLs. | |
# | |
# PURGE will look for a regular expression of matching URLs, something | |
# like this: curl -X PURGE "http://my.site/ban/these/urls/.*" | |
# | |
# The request above will ban all of the following URLs from cache: | |
# http://my.site/ban/these/urls/like/this/url/ | |
# http://my.site/ban/these/urls/like/that/url/ | |
# http://my.site/ban/these/urls/even/this/?monkey=hammer&foo=baz | |
# | |
# This code is located in vcl_miss because you shouldn't have a page | |
# that matches your regex. If you think you might, this code can also | |
# go in the vcl_hit subroutine below. | |
if (req.request == "PURGE") { | |
ban_url(req.url); | |
error 200 "Miss and banned, sire."; | |
} | |
# Pass this cache-missed request along to be fetched from the backend. | |
return(fetch); | |
} | |
/* | |
* | |
* The hit subroutine. | |
* | |
*/ | |
sub vcl_hit { | |
# A PRIME request is for a single URL, and it will usually be delivered | |
# by two consecutive requests, like this: | |
# | |
# curl -X PRIME "http://my.site/election/dashboard/ | |
# curl -X GET "http//my.site/election/dashboard/ | |
# | |
# A PRIME request won't just ban the object from the cache; it's much | |
# sneakier than that. PRIME will set the cache object to expire 1 second | |
# into the future. This way, Varnish will cover your following GET request | |
# by sending the now-stale cache object to every other client while your | |
# GET request loads the page from the backend. | |
# | |
# Real-life story: If you have a big, slow page that you need to occasionally | |
# refresh as new data becomes available, you can use this method to expire | |
# and then reload the cache object without anyone on the internet ever | |
# having to wait for your big, slow page to load. Thanks to Jeff Larson and | |
# Chris Groskopf the idea. | |
if (req.request == "PRIME") { | |
set obj.ttl = 1s; | |
error 200 "USDA PRIME."; | |
} | |
# Pass the request on to the deliver subroutine. | |
return(deliver); | |
} | |
/* | |
* | |
* The subroutine which fetches a response from the backend. | |
* | |
*/ | |
sub vcl_fetch { | |
# Set this object to live in the cache for an hour. | |
set beresp.ttl = 1h; | |
# Set this object to stick around in the cache for an extra 5 minutes | |
# in case we need to serve it for grace mode. | |
set beresp.grace = 5m; | |
# Make this cacheable. | |
set beresp.http.X-Cacheable = "YES"; | |
# Defeat the Vary header. | |
unset beresp.http.Vary; | |
# Send us along to the deliver subroutine. | |
return(deliver); | |
} | |
/* | |
* | |
* The subroutine which delivers the response to the client. | |
* | |
*/ | |
sub vcl_deliver { | |
# Troubleshooting Varnish can be hard. It's much easier when you | |
# send along some custom headers letting you know if the response | |
# was a cache hit or not. | |
# | |
# This particular block passes along HIT/MISS along with a count. | |
if (obj.hits > 0) { | |
set resp.http.X-Cache = "HIT"; | |
set resp.http.X-Cache-Hits = obj.hits; | |
set resp.http.X-Cache-Backend = req.backend; | |
} else { | |
set resp.http.X-Cache = "MISS"; | |
} | |
# Have a little fun. | |
set resp.http.X-Inception-Horn = "Brrrrrrnk."; | |
# Return the response to the client. | |
return(deliver); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment