Skip to content

Instantly share code, notes, and snippets.

@alecmuffett
Created January 27, 2018 11:57
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save alecmuffett/888c34bbd8e026c2b3c3f4f5a87b3d4b to your computer and use it in GitHub Desktop.
Save alecmuffett/888c34bbd8e026c2b3c3f4f5a87b3d4b to your computer and use it in GitHub Desktop.
Example for Vess
set project bontchev
hardmap %NEW_ONION% bontchev.com
# -*- awk -*-
# eotk (c) 2017 Alec Muffett
# EMACS awk mode works quite well for nginx configs
# ---- BEGIN HARD/CLASSIC SWITCH ----
# *CLASSIC* configuration
# swap domain names for onions via targeted regular expressions...
# ---- END HARD/CLASSIC SWITCH ----
# logs and pids
pid /Users/alecm/src/eotk/projects.d/bontchev.d/nginx.pid;
error_log /Users/alecm/src/eotk/projects.d/bontchev.d/log.d/nginx-error.log error;
# TODO: notes for custom 403 error-handling pages:
# https://www.cyberciti.biz/faq/unix-linux-nginx-custom-error-403-page-configuration/
# https://nginx.org/en/docs/http/ngx_http_core_module.html#error_page
# performance
worker_processes auto; # hardmap
worker_rlimit_nofile 256;
events {
worker_connections 256;
}
http {
# nginx fails without large enough buckets (sigh)
map_hash_bucket_size 128;
# dns for proxy (sigh)
resolver 8.8.8.8 valid=15s;
resolver_timeout 15s;
# we walk a line between keeping it small and flooding resources...
proxy_buffering on;
# for initial; impacts SSL header
proxy_buffer_size 8k;
# for rest of response
proxy_buffers 8 8k;
# how much can be busy sending to client?
proxy_busy_buffers_size 16k;
# where to stash oversize requests?
client_body_temp_path /tmp/nginx-body-bontchev;
client_max_body_size 4m;
# in case we want to start spooling responses locally
proxy_temp_path /tmp/nginx-proxy-bontchev;
proxy_max_temp_file_size 256m;
proxy_temp_file_write_size 8k;
# nginx caching static responses for 60 seconds
# - this is a lightweight cache to reduce "storms", hence the global
# approch of "cache everything for a small number of seconds"
# https://nginx.org/en/docs/http/ngx_http_proxy_module.html
proxy_cache_path /tmp/nginx-cache-bontchev levels=1:2 keys_zone=bontchev:256m;
proxy_cache bontchev;
proxy_cache_min_uses 1;
proxy_cache_revalidate on;
proxy_cache_use_stale timeout updating;
proxy_cache_valid any 60s; # "any" includes 404s, etc
# content-types to not cache
map $http_content_type $no_cache_content_type {
default 0;
}
# hosts not to cache
map $http_host $no_cache_host {
hostnames;
default 0;
}
# so, should we skip caching this stuff for some reason?
proxy_no_cache $no_cache_content_type $no_cache_host;
proxy_cache_bypass $no_cache_content_type $no_cache_host;
# logs
access_log /Users/alecm/src/eotk/projects.d/bontchev.d/log.d/nginx-access.log;
# global settings
server_tokens off;
# allow/deny (first wins)
allow "unix:";
deny all;
# rewrite these content types; text/html is implicit
subs_filter_types
application/javascript
application/json
application/x-javascript
text/css
text/javascript
text/xml
# no extra_subs_filter_types
;
#==================================================================
#------------------------------------------------------------------
# ---- BEGIN CLASSIC MODE CODE ----
# subs_filter: these patterns bear some explanation; the goal is to
# work regular expressions really hard in order to minimise the
# number of expressions which are used in the basic config, so the
# basic pattern is to capture zero/more "sub." in "//sub.foo.com"
# and interpolate that into "//sub.xxxxxxxx.onion"; so far?
# but it turns out that some JSON libraries like to "escape" the
# forward slashes in JSON content, leading to input like (literal)
# "http:\/\/sub.foo.com\/foo.html" - so you need to add the
# backslashes, but then you need to escape the backslashes, except
# they need double-escaping in the regexp because of string
# interpolation; hence 4x backslash -> 1x matched character
# likewise we use the "_RE2" form of the re-escaped domain name in
# order to coerce the regexp to match literal dots, not wildcards.
# there seems to be some sort of shortcut at play here; the trailing
# "\\b" also seems to work as "\b" however that would apparently
# break the double-escaping that is necessary/works everywhere else
# in subs_filter.
# also, regrettably, named capture groups appear not to work, we're
# fortunate that there appear not to be more than 9 capture groups
# by default, lest "$1" bleed into the subsequent digits of an onion
# address: $1234567abcdefghij.onion
# finally: some sites encode // with %-encoded "2F" in URIs...
# for bontchev.com -> uw5xpd6ygotmq2tz.onion anchored by // or \/\/
subs_filter
(/|\\\\/\\\\)/(([-0-9a-z]+\\.)+)?bontchev\\.com\\b
$1/$2uw5xpd6ygotmq2tz.onion
gir
;
# for bontchev.com -> uw5xpd6ygotmq2tz.onion anchored with hex-encoded slashes
subs_filter
%2F%2F(([-0-9a-z]+\\.)+)?bontchev\\.com\\b
%2F%2F$1uw5xpd6ygotmq2tz.onion
gir
;
# no foreignmap subs
# ---- END CLASSIC MODE CODE ----
#==================================================================
# o_to_d_lookup -> if cannot remap, return input. note: old versions
# of lua-plugin cannot cope with code like o_to_d_mappings[o[1]]
# because of `long bracket syntax`; the `[o[` freaks it out.
# See: https://github.com/openresty/lua-nginx-module/issues/748
init_by_lua_block {
-- helper functions for elsewhere
slog = function (s) -- in case of manual debugging
ngx.log(ngx.ERR, s)
return
end
has_suffix = function (s, x)
return string.sub(s, -string.len(x)) == x
end
-- mapping onions to dns
o_to_d_mappings = {}
o_to_d_mappings["uw5xpd6ygotmq2tz.onion"] = "bontchev.com"
o_to_d_lookup = function (m)
local k = m[1] -- see note above re: array syntax
return ( o_to_d_mappings[k] or k )
end
onion_to_dns = function (i)
if i == nil or i == "" then
return i
end
if (type(i) == "table") then
local j, k, result
result = {}
for j, k in ipairs(i) do
table.insert(result, onion_to_dns(k))
end
return result
end
local o, num, errs = ngx.re.gsub(i, "\\b([a-z2-7]{16}\\.onion)\\b", o_to_d_lookup, "io")
return o
end
-- mapping dns to onions, for experimentation
d_to_o_mappings = {}
d_to_o_mappings["bontchev.com"] = "uw5xpd6ygotmq2tz.onion"
d_to_o_lookup = function (m)
local k = m[1] -- see note above re: array syntax
return ( d_to_o_mappings[k] or k )
end
dns_to_onion = function (i)
if i == nil or i == "" or i == "*" then
return i
end
if (type(i) == "table") then
local j, k, result
result = {}
for j, k in ipairs(i) do
table.insert(result, dns_to_onion(k))
end
return result
end
local num, errs
i, num, errs = ngx.re.gsub(i, "\\bbontchev\\.com\\b", "uw5xpd6ygotmq2tz.onion", "io")
return i
end
-- a note for future maintainers; if we were being strictly orthogonal then
-- the replacement with ONION_ADDRESS in much of this Lua block would have to
-- be double-escaped for potential backslashes, because double-quotes;
-- however this is not needed because DNS forbids backslash; the only code
-- where this becomes evident/necessary is here, with "_RE2":
dnsre_to_onionre = function (i)
local num, errs
-- TODO: BRING THIS INTO LINE WITH dns_to_onion --
i, num, errs = ngx.re.gsub(i, "\\bbontchev\\\\\\.com\\b", "uw5xpd6ygotmq2tz\\.onion", "io")
return i
end
}
# filter the response headers en-route back to the user
header_filter_by_lua_block {
local k, v
-- ==================================================================
-- ---- BEGIN CLASSIC MODE CODE ----
-- is this javascript/json? if so, extra processing:
-- 1) set a processing flag to pick up in body_filter_by_lua_block
-- 2) invalidate content-length, because we will change it
k = "Content-Type"
v = ngx.header[k]
if v == "application/javascript" or
v == "application/json" or
v == "application/x-javascript" or
v == "text/css" or
v == "text/javascript" then
ngx.ctx.needs_extra_processing = 1
ngx.header.content_length = nil
end
-- no extra_processing_csv checks
-- ---- END CLASSIC MODE CODE ----
-- ==================================================================
local origin_rewrites = {
"Access-Control-Allow-Origin",
"Content-Security-Policy",
"Content-Security-Policy-Report-Only",
"Link",
"Location",
"Set-Cookie"
}
local i, k
for i, k in ipairs(origin_rewrites) do
local v = ngx.header[k]
if v then
ngx.header[k] = dns_to_onion(v)
end
end
}
# filter the response body en-route back to the user
body_filter_by_lua_block {
-- ==================================================================
-- ---- BEGIN CLASSIC MODE CODE ----
-- rather than blindly replacing "foo.com" with "foo.onion" everywhere,
-- instead we restrict such brute-force replacement to content that was
-- flagged in header_filter_by_lua_block
if ngx.ctx.needs_extra_processing == 1 then
-- the flag was set; this content deserves brute-force search & replace
local chunk = ngx.arg[1]
-- subs_filter picked up the "//"-anchored strings; now we sub the rest
chunk = dns_to_onion(chunk)
-- and we sub the basic "foo\.com" regular-expressions, too
chunk = dnsre_to_onionre(chunk)
-- more complex regular expressions are out of scope.
ngx.arg[1] = chunk
end
-- ---- END CLASSIC MODE CODE ----
-- ==================================================================
-- no debug traps
}
# csp not suppressed, will be rewritten instead, see below
# hsts suppression
proxy_hide_header "Strict-Transport-Security";
# hpkp suppression
proxy_hide_header "Public-Key-Pins";
proxy_hide_header "Public-Key-Pins-Report-Only";
# global proxy settings
proxy_read_timeout 15;
proxy_connect_timeout 15;
# SSL config
ssl_certificate /Users/alecm/src/eotk/projects.d/bontchev.d/ssl.d/uw5xpd6ygotmq2tz.onion.cert;
ssl_certificate_key /Users/alecm/src/eotk/projects.d/bontchev.d/ssl.d/uw5xpd6ygotmq2tz.onion.pem;
ssl_buffer_size 4k;
#ssl_ciphers 'EECDH+CHACHA20:EECDH+AESGCM:EECDH+AES256'; ## LibreSSL, OpenSSL 1.1.0+
ssl_ciphers 'EECDH+AESGCM:EECDH+AES256'; ## OpenSSL 1.0.1% to 1.0.2%
ssl_ecdh_curve prime256v1;
#ssl_ecdh_curve secp384r1:prime256v1; ## NGINX nginx 1.11.0 and later
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# websockets: on the basis of http_upgrade, set connection_upgrade:
# empty -> empty
# default -> "upgrade"
map $http_upgrade $connection_upgrade {
default "upgrade";
"" "";
}
# FORCE_HTTPS is in use; set up separate server for port 80 & force redirects
server {
listen unix:/Users/alecm/src/eotk/projects.d/bontchev.d/uw5xpd6ygotmq2tz.onion.d/port-80.sock;
# subdomain regexp captures trailing dot, use carefully; does not need "~*"
# NB: this regexp should be kept in-sync with the other FORCE_HTTPS copy
server_name
uw5xpd6ygotmq2tz.onion
~^(?<servernamesubdomain>([-0-9a-z]+\\.)+)uw5xpd6ygotmq2tz\\.onion$
;
# suppress tor2web traffic; "let them use clearnet"
if ( $http_x_tor2web ) {
return 403 "This action is not supported over Onion yet, sorry.";
}
# tell the client to try again as HTTPS without ever leaving the onion
# use 307 / temporary redirect because your URIs may change in future
# use $host (not $server) to copy-over subdomains, etc, transparently
# SEND BACK ORIGINAL PARAMS, FIX THEM ONLY UPON FORWARD TO THE PROXY.
return 307 https://$host$request_uri;
}
# for uw5xpd6ygotmq2tz.onion -> bontchev.com
server {
# hardmap
# unix sockets; use <ONION_ADDRESS>.d as a naming convention
listen unix:/Users/alecm/src/eotk/projects.d/bontchev.d/uw5xpd6ygotmq2tz.onion.d/port-443.sock ssl;
# subdomain regexp captures trailing dot, use carefully; does not need "~*"
# NB: this regexp should be kept in-sync with the other FORCE_HTTPS copy
server_name
uw5xpd6ygotmq2tz.onion
~^(?<servernamesubdomain>([-0-9a-z]+\\.)+)uw5xpd6ygotmq2tz\\.onion$
;
# ---- BEGIN GENERATED CODE ---- -*- awk -*-
# blacklists (generated)
# no user_agent_blacklist
# no user_agent_blacklist_re
# no referer_blacklist
# no referer_blacklist_re
# no origin_blacklist
# no origin_blacklist_re
# no host_blacklist
# no host_blacklist_re
# no path_blacklist
# no path_blacklist_re
# no param_blacklist
# no param_blacklist_re
# polite blocks (generated)
# polite block for suppress_tor2web (generated)
if ( $http_x_tor2web ) { return 403 "This action is not supported over Onion yet, sorry."; }
# no block_user_agent
# no block_user_agent_re
# no block_referer
# no block_referer_re
# no block_origin
# no block_origin_re
# no block_host
# no block_host_re
# no block_path
# no block_path_re
# no block_location
# no block_location_re
# no block_param
# no block_param_re
# redirects (generated)
# no redirect_host
# no redirect_path
# no redirect_host_csv
# no redirect_path_csv
# no redirect_location_csv
# whitelists (generated)
# no user_agent_whitelist
# no user_agent_whitelist_re
# no referer_whitelist
# no referer_whitelist_re
# no origin_whitelist
# no origin_whitelist_re
# no host_whitelist
# no host_whitelist_re
# no path_whitelist
# no path_whitelist_re
# no param_whitelist
# no param_whitelist_re
# ---- END GENERATED CODE ----
# no cookie_lock cookie setting
# for test & to help SSL certificate acceptance
location ~* ^/hello[-_]onion/?$ {
return 200 "Hello, Onion User!";
}
# no hardcoded_endpoints
# for traffic
location / {
# ---- BEGIN GENERATED CODE ---- -*- awk -*-
# whitelist checks (generated)
# no user_agent_whitelist
# no user_agent_whitelist_re
# no referer_whitelist
# no referer_whitelist_re
# no origin_whitelist
# no origin_whitelist_re
# no host_whitelist
# no host_whitelist_re
# no path_whitelist
# no path_whitelist_re
# no param_whitelist
# no param_whitelist_re
# ---- END GENERATED CODE ----
# no cookie-lock checks
# deonionify the request_uri for forwarding (both path and args)
set_by_lua_block $request_uri2 {
local old = ngx.var.request_uri
-- onion_to_dns is potentially expensive at scale, so do a cheap test
local m, err = ngx.re.match(old, "\\b[a-z2-7]{16}\\.onion\\b", "o")
if not m then -- nothing to attempt to rewrite, quick return
return old
end
return onion_to_dns(old)
}
# note use of both $scheme and the deonionified uri (both path and args)
set $new_url "$scheme://${servernamesubdomain}bontchev.com$request_uri2";
proxy_pass $new_url;
proxy_http_version 1.1;
# a note on proxy_set_header, add_header, similar methods, etc;
# if you override *any* header then you will lose the other
# headers inherited from the parent contexts:
# https://blog.g3rt.nl/nginx-add_header-pitfall.html
proxy_set_header X-From-Onion 1;
proxy_set_header Host "${servernamesubdomain}bontchev.com";
proxy_set_header Accept-Encoding "identity";
proxy_set_header Connection $connection_upgrade; # SSL
proxy_set_header Upgrade $http_upgrade; # SSL
proxy_ssl_server_name on; # SSL
# rewrite request referer
set_by_lua_block $referer2 { return onion_to_dns(ngx.var.http_referer) }
proxy_set_header Referer $referer2;
# rewrite request origin
set_by_lua_block $origin2 { return onion_to_dns(ngx.var.http_origin) }
proxy_set_header Origin $origin2;
# rewrite request cookies
set_by_lua_block $cookie2 { return onion_to_dns(ngx.var.http_cookie) }
proxy_set_header Cookie $cookie2;
# non-GET methods (e.g.: POST) are not suppressed
}
}
# header purge
more_clear_headers "Age";
more_clear_headers "Server";
more_clear_headers "Via";
more_clear_headers "X-From-Nginx";
more_clear_headers "X-NA";
more_clear_headers "X-Powered-By";
more_clear_headers "X-Request-Id";
more_clear_headers "X-Runtime";
more_clear_headers "X-Varnish";
}
# -*- conf -*-
# eotk (c) 2017 Alec Muffett
DataDirectory /Users/alecm/src/eotk/projects.d/bontchev.d
ControlPort unix:/Users/alecm/src/eotk/projects.d/bontchev.d/tor-control.sock
PidFile /Users/alecm/src/eotk/projects.d/bontchev.d/tor.pid
Log notice file /Users/alecm/src/eotk/projects.d/bontchev.d/log.d/tor.log
SafeLogging 1
HeartbeatPeriod 60 minutes
LongLivedPorts 80,443
RunAsDaemon 1
# use single onions, many settings to tweak:
SocksPort 0
HiddenServiceSingleHopMode 1
HiddenServiceNonAnonymousMode 1
# hardmap for: bontchev.com -> uw5xpd6ygotmq2tz.onion
HiddenServiceDir /Users/alecm/src/eotk/projects.d/bontchev.d/uw5xpd6ygotmq2tz.onion.d
HiddenServicePort 80 unix:/Users/alecm/src/eotk/projects.d/bontchev.d/uw5xpd6ygotmq2tz.onion.d/port-80.sock
HiddenServicePort 443 unix:/Users/alecm/src/eotk/projects.d/bontchev.d/uw5xpd6ygotmq2tz.onion.d/port-443.sock
HiddenServiceNumIntroductionPoints 3
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment