Skip to content

Instantly share code, notes, and snippets.

@mmizutani mmizutani/nginx.conf forked from nateware/nginx.conf
Created Dec 10, 2018

What would you like to do?
Nginx sample config for EC2
# Sample nginx.conf optimized for EC2 c1.medium to xlarge instances.
# Also look at the haproxy.conf file for how the backend is balanced.
user "nginx" "nginx";
worker_processes 10;
error_log /var/log/nginx_error.log info;
pid /var/run/;
events {
worker_connections 1024;
http {
# Mime types path needs to be absolute as of nginx 0.7.x from 0.6.x
include /usr/local/nginx/conf/mime.types;
# Tune the appropriate default for your system accordingly. Only used if mime types fail.
#default_type text/html;
default_type application/octet-stream;
# These are good default values.
tcp_nopush on;
tcp_nodelay off;
sendfile on;
keepalive_timeout 30;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" ($request_time)';
# Output compression with gzip
gzip on;
gzip_http_version 1.1;
gzip_vary on;
gzip_comp_level 6;
gzip_proxied any;
gzip_types text/plain image/png image/gif image/jpeg text/html text/css application/json application/x-javascript application/xml application/xml+rss text/javascript;
gzip_buffers 16 8k;
# Disable gzip for certain browsers.
gzip_disable “MSIE [1-6].(?!.*SV1)”;
# Virtualhost server definition for backend cluster
# This is a combination of two different references:
# - Ezra's complete config
# List upstream app servers that render dynamic content. These are
# typically on the same server as nginx. These will either be multiple
# ports (processes), or a single port if the app server has its own
# master/slave process model.
upstream app_servers {
# HTTP configuration
server {
listen 80 default sndbuf=16k rcvbuf=8k backlog=1024;
# Apache DocumentRoot equivalent
root /var/www/html;
access_log /var/log/nginx_access.log main;
client_body_temp_path /tmp/nginx_client_data 1 2;
fastcgi_temp_path /tmp/nginx_fastcgi_data;
proxy_temp_path /tmp/nginx_proxy_data;
# Taken from nginx wiki. Qualified thru load testing
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
location / {
# needed to forward user's IP address to backend
proxy_set_header X-Real-IP $remote_addr;
# needed for HTTPS
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_max_temp_file_size 0;
# Use this variable to key off whether we pass requests to backend,
# or serve them directly via nginx. By default everything gets
# passed thru, and we only serve specific resources directly.
set $send_to_app "yes";
# Handle all images and assets explicitly. Faster than fs check every time.
if ($request_uri ~ "images/|img/|javascripts/|js/|stylesheets/|css/") {
set $send_to_app "no";
# If the file exists as a static file serve it directly, without
# running all the other rewite tests on it.
if (-f $request_filename) {
set $send_to_app "no";
# Check for index.html for directory index
# If it's there on the filesystem, then rewite the url to add
# /index.html to the end of it and serve it directly.
if (-f $request_filename/index.html) {
set $send_to_app "no";
rewrite (.*) $1/index.html break;
# This is the meat of web app page caching.
# It adds .html to the end of the url and then checks the filesystem for
# that file. If it exists, then we rewrite the url to have explicit .html
# on the end and then send it on its way to the next config rule.
# If there is no file on the fs then it sets all the necessary headers
# proxies to our backend.
if (-f $request_filename.html) {
set $send_to_app "no";
rewrite (.*) $1.html break;
# Check our state to make sure we're forwarding it back
if ($send_to_app = "yes") {
proxy_pass http://app_servers;
# File uploads
client_max_body_size 10m;
# Large content
location ^~ download {
client_body_buffer_size 1024k;
# redirect server error pages to the static page /50x.html
error_page 403 /403.html;
error_page 404 /404.html;
error_page 500 502 503 504 /500.html;
location = /500.html {
root /var/www/html;
# This number should be, at maximum, the number of CPU cores on your system.
# (since nginx doesn't benefit from more than one worker per CPU.)
worker_processes 24;
# Number of file descriptors used for Nginx. This is set in the OS with 'ulimit -n 200000'
# or using /etc/security/limits.conf
worker_rlimit_nofile 200000;
# only log critical errors
error_log /var/log/nginx/error.log crit
# Determines how many clients will be served by each worker process.
# (Max clients = worker_connections * worker_processes)
# "Max clients" is also limited by the number of socket connections available on the system (~64k)
worker_connections 4000;
# essential for linux, optmized to serve many clients with each thread
use epoll;
# Accept as many connections as possible, after nginx gets notification about a new connection.
# May flood worker_connections, if that option is set too low.
multi_accept on;
# Caches information about open FDs, freqently accessed files.
# Changing this setting, in my environment, brought performance up from 560k req/sec, to 904k req/sec.
# I recommend using some varient of these options, though not the specific values listed below.
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
# Buffer log writes to speed up IO, or disable them altogether
#access_log /var/log/nginx/access.log main buffer=16k;
access_log off;
# Sendfile copies data between one FD and other from within the kernel.
# More efficient than read() + write(), since the requires transferring data to and from the user space.
sendfile on;
# Tcp_nopush causes nginx to attempt to send its HTTP response head in one packet,
# instead of using partial frames. This is useful for prepending headers before calling sendfile,
# or for throughput optimization.
tcp_nopush on;
# don't buffer data-sends (disable Nagle algorithm). Good for sending frequent small bursts of data in real time.
tcp_nodelay on;
# Timeout for keep-alive connections. Server will close connections after this time.
keepalive_timeout 30;
# Number of requests a client can make over the keep-alive connection. This is set high for testing.
keepalive_requests 100000;
# allow the server to close the connection after a client stops responding. Frees up socket-associated memory.
reset_timedout_connection on;
# send the client a "request timed out" if the body is not loaded by this time. Default 60.
client_body_timeout 10;
# If the client stops reading data, free up the stale client connection after this much time. Default 60.
send_timeout 2;
# Compression. Reduces the amount of data that needs to be transferred over the network
gzip on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.