Skip to content

Instantly share code, notes, and snippets.

@eudes
Created February 1, 2013 21:18
Show Gist options
  • Save eudes/4694214 to your computer and use it in GitHub Desktop.
Save eudes/4694214 to your computer and use it in GitHub Desktop.
Calomel.org /etc/nginx.conf Nginx reverse proxy to a few back end web servers
#######################################################
### Calomel.org /etc/nginx.conf BEGIN
#######################################################
worker_processes 3;
#worker_rlimit_nofile 1024;
events {
worker_connections 64;
#accept_mutex_delay 50ms;
}
http {
## Size Limits
#client_body_buffer_size 8k;
#client_header_buffer_size 1k;
#client_max_body_size 1m;
#large_client_header_buffers 4 4k/8k;
## Timeouts
# client_body_timeout 60;
# client_header_timeout 60;
keepalive_timeout 300 300;
# send_timeout 60;
## General Options
charset utf-8;
default_type application/octet-stream;
ignore_invalid_headers on;
include /etc/mime.types;
keepalive_requests 100;
#keepalive_disable msie6;
max_ranges 0;
#open_file_cache max=1000 inactive=1h;
#open_file_cache_errors on;
#open_file_cache_min_uses 3;
#open_file_cache_valid 1m;
#output_buffers 1 512k;
postpone_output 1440;
recursive_error_pages on;
reset_timedout_connection on;
server_tokens off;
server_name_in_redirect off;
sendfile off; # off for FreeBSD zfs to avoid redundant data caching
sendfile_max_chunk 512K;
source_charset utf-8;
## Proxy settings. Make sure the "timeout"s are long enough to
## take account of over loaded back end servers or long running
## cgi scripts. If the proxy timeout is too short the nginx proxy
## might re-request the data over and over again, putting more
## load on the backend server.
proxy_max_temp_file_size 0;
proxy_connect_timeout 900;
proxy_send_timeout 900;
proxy_read_timeout 900;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
proxy_intercept_errors on;
proxy_cache_path /disk01/web_cache levels=1:2 keys_zone=webcache:10m inactive=1d max_size=2000m;
proxy_temp_path /disk01/web_cache/tmp;
proxy_cache_min_uses 5;
## Limit requests per second to 250 requests per minute. If the
## user's ip address goes over the limit they will be sent an
## error 503 for every subsequent request.
limit_req_zone $binary_remote_addr zone=gulag:10m rate=250r/m;
## Compression (if you want real time compression you can enable it here. Make
## sure your system is quite fast as compression could add a noticeable delay in
## sending data to the client.
#gzip off;
#gzip_static on;
#gzip_buffers 16 8k;
#gzip_http_version 1.0;
#gzip_comp_level 6;
#gzip_min_length 100;
#gzip_types text/plain text/html text/css image/x-icon image/gif;
#gzip_vary on;
## Log Format
log_format main '$remote_addr $host $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"';
log_format cache '$time_local $upstream_cache_status Cache-Control: $upstream_http_cache_control Expires: $upstream_http_expires "$request" ($status) "$http_user_agent"';
## back end web servers with "hot fail over". You can add as many back end
## servers as you like here. If you add the "backup" directive the server will
## only be used if the other servers are down. In this example we have two main
## web servers (100 and 150) being load balanced and (1.200) as the hot spare
## backup.
upstream backend_web_servers {
server 192.168.1.100:80 max_fails=250 fail_timeout=180s;
server 192.168.1.150:80 max_fails=250 fail_timeout=180s;
server 192.168.1.200:80 backup;
}
## http .:. clients without a host header get an error page. This will handle
## scanners looking for servers on an ip address and broken clients.
server {
add_header Cache-Control "public, must-revalidate";
access_log /var/log/nginx/access.log main buffer=32k;
error_log /var/log/nginx/error.log error;
expires 30d;
listen 192.168.1.50:80;
server_name _;
## All other errors get the generic error page
error_page 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 495 496 497
500 501 502 503 504 505 506 507 /error_page.html;
location /error_page_generic.html {
internal;
}
}
## This is our first hostname, example.com. Any client with a host header
## containing any string in this server{} block's server_name directive will be
## served from here.
## http .:. example.com
server {
add_header Cache-Control "public, must-revalidate";
access_log /var/log/nginx/access.log main buffer=32k;
access_log /var/log/nginx/cache.log cache;
error_log /var/log/nginx/error.log error;
expires 1h;
listen 192.168.1.50:80 sndbuf=128k;
limit_req zone=gulag burst=1000 nodelay;
server_name example.com www.example.com internal_lan_name.domain.lan;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_cache webcache;
proxy_cache_key $scheme$host$request_uri;
proxy_cache_valid 200 301 302 304 120m;
proxy_cache_valid any 1m;
# Only allow GET, HEAD and POST request methods. Since this a proxy you may
# want to be more restrictive with your request methods. The calls are going
# to be passed to the back end server and nginx does not know what it
# normally accepts, so everything gets passed. If we only need to accept GET
# HEAD and POST then limit that here.
if ($request_method !~ ^(GET|HEAD|POST)$ ) {
return 403;
}
# user forum. This goes to a different server then the standard we cluster for example.
location /forum/ {
proxy_pass http://192.168.1.201/forum/;
}
## Do not cache any requests using our cgi paths. The http_my_secret_header
## is not used and is just a place holder if you need this function.
location ~* (/cgi/||/authorized/|/restrict/) {
expires epoch;
proxy_no_cache 1;
proxy_cache_bypass $http_my_secret_header;
proxy_pass http://backend_web_servers;
}
# default htdocs
location / {
## redirect host going to www to non-www hostname
if ($host ~* ^(www\.example\.com)$ ) {
rewrite ^/(.*)$ http://example.com/$1 permanent;
}
proxy_pass http://backend_web_servers;
}
## All other errors get the generic error page
error_page 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 495 496 497
500 501 502 503 504 505 506 507 /error_page.html;
location /error_page_example_com.html {
internal;
}
}
## This next section is here to show you that nginx can proxy a completely
## different domain name out of the same nginx daemon. On the back end servers,
## someotherdomain.com is served from a different Apache virtual host.
## http .:. someotherdomain.com
server {
add_header Cache-Control "public, must-revalidate";
access_log /var/log/nginx/access.log main buffer=32k;
access_log /var/log/nginx/cache.log cache;
error_log /var/log/nginx/error.log error;
expires 1h;
listen 192.168.1.50:80 sndbuf=128k;
limit_req zone=gulag burst=100 nodelay;
server_name someotherdomain.com www.someotherdomain.com some_internal_lan_name.domain.lan;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_cache webcache;
proxy_cache_key $scheme$host$request_uri;
proxy_cache_valid 200 301 302 304 180m;
proxy_cache_valid any 1m;
## Only allow GET, HEAD and POST request methods
if ($request_method !~ ^(GET|HEAD|POST)$ ) {
return 403;
}
# snv server
location /svn/ {
proxy_pass http://192.168.1.60:8000/svn/;
}
# git server
location /git/ {
proxy_pass http://192.168.1.70:8000/git/;
}
# forum redirect -- for example, perhaps we do not host that
# data anymore and want to redirect users to the new location.
location /forum/ {
rewrite ^/(.*)$ http://new_loaction.somedomain.com/forum/ permanent;
}
# default htdocs
location / {
proxy_pass http://backend_web_servers;
}
## All other errors get the generic error page
error_page 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 495 496 497
500 501 502 503 504 505 506 507 /error_page_3.html;
location /error_page_someotherdomain_com.html {
internal;
}
}
}
#
#######################################################
### Calomel.org /etc/nginx.conf END
#######################################################
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment