Skip to content

Instantly share code, notes, and snippets.

@Rillke
Created March 25, 2020 21:48
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Rillke/47c2c24ed07e20bd589d45e2ad63ad90 to your computer and use it in GitHub Desktop.
Save Rillke/47c2c24ed07e20bd589d45e2ad63ad90 to your computer and use it in GitHub Desktop.
Opencast Presentation nginx Proxy
# Yes, you are an nginx. But do not tell everyone your version.
server_tokens off;
# Cache definition; used for proxying to Jetty
# This defines a chache stored at /tmp/nginx/cache with only
# one hierarchy level, no additional temp path, a key (used for
# matching requests to cache entries) zone in RAM which is 10M,
# schedules entries older than 2h for deletion and a maximum size
# of 160M
proxy_cache_path /tmp/nginx/cache levels=1 use_temp_path=off
keys_zone=STATIC:10m inactive=2h max_size=160m;
# connection limits
# When the key in limit_req_zone (and limit_conn_zone) is empty
# the limits are not applied. You can use this in conjunction with
# the map and geo modules to create a whitelist of IPs where the
# throttle limits are not applied.
geo $whitelist {
default 0;
# CHANGE ME: university one
141.xxx.xxx.xxx/16 1;
# CHANGE ME: university two
193.xxx.xxx.xxx/22 1;
}
map $whitelist $bin_remote_addr_if_limits_apply {
0 $binary_remote_addr;
1 '';
}
# Sets parameters for a shared memory zone that will keep states
# for various keys. In particular, the state includes the current
# number of connections. The key can contain text, variables, and
# their combination. Requests with an empty key value are not accounted.
limit_conn_zone $bin_remote_addr_if_limits_apply zone=conn_addr:10m;
# 20 requests per second per IP
limit_req_zone $bin_remote_addr_if_limits_apply zone=req_addr:10m rate=20r/s;
# Sets the text that should be changed in the “Location” and “Refresh”
# header fields of a proxied server response.
# This is required for example in the media module when logging in.
# Otherwise login succeeds but the redirect is sent to plain HTTP,
# upgraded by CSP or HSTS but without Cookie and Referrer leading
# to a 419: No Reason Phrase
proxy_redirect http://$host https://$host;
# Define client-side caching levels for different MIME types
map $sent_http_content_type $expires_aggressive {
default off;
text/html 5d;
text/css 5d;
application/javascript 5d;
~image/ 2d;
application/font-woff max;
application/font-woff2 max;
application/vnd.ms-fontobject max;
application/x-font-opentype max;
application/x-font-truetype max;
application/x-font-ttf max;
font/woff max;
font/woff2 max;
}
map $sent_http_content_type $expires_lite {
default off;
application/font-woff max;
application/font-woff2 max;
application/vnd.ms-fontobject max;
application/x-font-opentype max;
application/x-font-truetype max;
application/x-font-ttf max;
font/woff max;
font/woff2 max;
}
# CORS preparations: Allow CORS requests from some hosts (1)
# for plugin integration.
# CHANGE ME
map $http_origin $cors_origin {
default '';
https://studip.example.com $http_origin;
https://ilias.example.com $http_origin;
}
map $http_origin $cors_credentials {
default '';
https://studip.example.com true;
https://ilias.example.com true;
}
# Only generate a weak etag if an upstream etag exists
map $upstream_http_etag $weak_etag {
default 'W/$upstream_http_etag';
'' '';
}
# Most of this was generated by https://github.com/jwilder/nginx-proxy
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
default $http_x_forwarded_proto;
'' $scheme;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $http_x_forwarded_port;
'' $server_port;
}
# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any
# Connection header that may have been passed to this server
map $http_upgrade $proxy_connection {
default upgrade;
'' close;
}
# Apply fix for very long server names
server_names_hash_bucket_size 128;
# Default dhparam
ssl_dhparam /etc/nginx/dhparam/dhparam.pem;
# Set appropriate X-Forwarded-Ssl header
map $scheme $proxy_x_forwarded_ssl {
default off;
https on;
}
gzip_types text/plain text/css application/javascript application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
log_format vhost '$host $remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers off;
# CHANGE ME
#resolver 127.0.0.11;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# Mitigate httpoxy attack (see README for details)
proxy_set_header Proxy "";
server {
server_name _; # This is just an invalid value which will never trigger on a real hostname.
listen 80;
access_log /var/log/nginx/access.log vhost;
return 503;
}
# presentation.opencast.example.com
upstream oc_presentation {
# Keep 9 connections alive to upstream per worker. The headers
# and protocol (HTTP/1.1) required are already set by jwilder config.
# That's 9*16=151 with 16 vCores. Jetty accepts up to about 500.
# When exceeded least recently used connections are closed.
# Needs additional configuration in vhost_location settings.
keepalive 9;
## Can be connected with "opencastdocker_default" network
# CHANGE ME:
# opencastdocker_presentation_1
server 172.19.0.6:8080;
}
server {
# CHANGE ME:
server_name presentation.opencast.example.com;
listen 80 default_server;
access_log /var/log/nginx/access.log vhost;
location / {
return 301 https://$host$request_uri;
}
}
server {
# CHANGE ME:
server_name presentation.opencast.example.com;
listen 443 ssl http2 default_server;
access_log /var/log/nginx/access.log vhost;
ssl_session_timeout 5m;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# CHANGE ME:
ssl_certificate /etc/nginx/certs/presentation.opencast.example.com.crt;
ssl_certificate_key /etc/nginx/certs/presentation.opencast.example.com.key;
add_header Strict-Transport-Security "max-age=31536000" always;
include /etc/nginx/vhost.d/presentation.opencast.example.com;
location / {
proxy_pass http://oc_presentation;
include /etc/nginx/vhost.d/presentation.opencast.example.com_location;
}
}
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
# worker_rlimit_nofile:
# number of file descriptors used for nginx
# the limit for the maximum FDs on the server is usually set by the OS.
# if you don't set FD's then OS settings will be used which is by default 2000
worker_rlimit_nofile 100000;
# thread_pool: Defines named thread pools used for multi-threaded reading and
# sending of files without blocking worker processes.
# In the event that all threads in the pool are busy, a new task will wait in the queue.
thread_pool ocvideo threads=256 max_queue=65536;
events {
# worker_connections:
# maximum number of simultaneous connections that can be opened by a worker process
# default is 512 and 1024 in alpine, which only gives 16384 connections with 16 workers
# and this includes connections to upstream servers as well
worker_connections 2048;
# worker_aio_requests:
# When using aio with the epoll connection processing method,
# sets the maximum number of outstanding asynchronous I/O operations for a single worker process.
# default is 32
worker_aio_requests 256;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}
daemon off;
# Do not buffer requests
proxy_request_buffering off;
# Do not buffer responses
# off by default in jwilder and duplicating will cause error
#proxy_buffering off;
# Enable on-the-fly gzip compression for larger plain text files and for
# proxied applications responses.
gzip on;
# default is 1; 2 ist still moderate
gzip_comp_level 2;
# asset must exceed x bytes to be compressed
gzip_min_length 1000;
# enable gzip, no matter what, for proxied responses
gzip_proxied any;
# only for some types we know, will benefit from caching
gzip_types text/plain
application/javascript
application/json
text/xml
text/css
application/xml;
# add response headers
include /etc/nginx/response-headers.conf;
# do not send requests for favicons to Jetty/Opencast
location /favicon.ico {
alias /data/opencast/downloads/favicon.ico;
sendfile on;
}
# do not send requests for robots.txt to Jetty/Opencast
location /robots.txt {
alias /data/opencast/downloads/robots.txt;
sendfile on;
}
# do not allow more than x req per s
# With this configuration, first 20 requests (delay) are
# passed without delay, next 80 requests (burst - delay)
# are delayed in such a way that the overall rate is not greater
# than specified, further excessive requests will be rejected
# because the total burst size has been exceeded, subsequent
# requests will be delayed.
# n.b. Studentenwerk is excepted by req_addr zone.
limit_req zone=req_addr burst=100 delay=20;
# serve static files
location ^~ /static/ {
alias /data/opencast/downloads/;
include /etc/nginx/videos.conf;
}
# shorthand for ilias (200 character URL limit)
location ^~ /v/ {
alias /data/opencast/downloads/mh_default_org/engage-player/;
include /etc/nginx/videos.conf;
}
# Enable caching for static content
# This allows reduce the connections made to Jetty.
location /engage/ {
# Host names are not beautiful but work and since it's cached, that's just fine
# CHANGE ME:
proxy_pass http://oc_presentation;
# We must? buffer responses while streaming them to the client in order to cache them
proxy_buffering on;
# The cache's name (definition in HTTP context)
proxy_cache STATIC;
# Allow sending stale responses while running a background refresh
proxy_cache_background_update on;
# Define validity of cached entries based on response code
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 301 5m;
proxy_cache_valid any 1m;
# Allow the cache to respond even if upstream throws errors now
proxy_cache_use_stale error timeout invalid_header updating
http_500 http_502 http_503 http_504;
# Add weak e-tag header as the strong one from upstream is removed by
# nginx when proxy+gzip is active
add_header Etag $weak_etag;
include /etc/nginx/response-headers.conf;
# Do not log!
access_log off;
}
# Unfortunately we cannot simply cache everything under /engage/
# because the plugins appear to be randomized each time a new jetty
# instance is started
# But the following locations should be safe to cache:
location /engage/theodul/ui/js/lib/ {
include /etc/nginx/static-proxy-pass.conf;
}
location /engage/theodul/ui/css/ {
include /etc/nginx/static-proxy-pass.conf;
}
location /engage/ui/js/lib/ {
include /etc/nginx/static-proxy-pass.conf;
}
location /engage/ui/css/ {
include /etc/nginx/static-proxy-pass.conf;
}
location /ltitools/ {
include /etc/nginx/static-proxy-pass.conf;
}
# This location may do something with Cookies. Do not cache.
location /ltitools/series/ {
# Host names are not beautiful but work and since it's cached, that's just fine
# CHANGE ME:
proxy_pass http://oc_presentation;
# Add weak e-tag header as the strong one from upstream is removed by
# nginx when proxy+gzip is active
add_header Etag $weak_etag;
include /etc/nginx/response-headers.conf;
# Do not log!
access_log off;
}
# This seems to be safe to cache.
location /ltitools/series/series {
include /etc/nginx/static-proxy-pass.conf;
}
# HSTS
add_header Strict-Transport-Security "max-age=31536000" always;
# CAUTION: There could be several add_header directives.
# These directives are inherited from the previous level
# if and only if there are no add_header directives defined
# on the current level.
# -------------------------
# Allow some CORS access
# https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
add_header Access-Control-Allow-Origin '$cors_origin';
add_header Access-Control-Allow-Credentials '$cors_credentials';
# Specify CSP (as long as Opencast doesn't do it while it really should)
add_header Content-Security-Policy "upgrade-insecure-requests; default-src 'self' 'unsafe-eval' 'unsafe-inline' data: mediastream: blob:; img-src *; media-src *;";
# Send information to the client about caching of proxied requests
add_header X-Cache-Status $upstream_cache_status;
#
add_header X-Rate-Limit-Status $limit_req_status;
add_header X-Conn-Limit-Status $limit_conn_status;
# Host names are not beautiful but work and since it's cached, that's just fine
# CHANGE ME:
proxy_pass http://oc_presentation;
# We must? buffer responses while streaming them to the client in order to cache them
proxy_buffering on;
# The cache's name (definition in HTTP context)
proxy_cache STATIC;
# Allow sending stale responses while running a background refresh
proxy_cache_background_update on;
# Define validity of cached entries based on response code
proxy_cache_valid 200 302 1h;
proxy_cache_valid 404 301 5m;
proxy_cache_valid any 1m;
# Allow the cache to respond even if upstream throws errors now
proxy_cache_use_stale error timeout invalid_header updating
http_500 http_502 http_503 http_504;
# Do not log!
access_log off;
# Cache!
expires $expires_aggressive;
# Add weak e-tag header as the strong one from upstream is removed by
# nginx when proxy+gzip is active
add_header Etag $weak_etag;
include /etc/nginx/response-headers.conf;
# async input output: use the threads from the ocvideo pool for delivery
aio threads=ocvideo;
# for smaller files (thumbnails for example), use sendfile
# allows to transfer data from a file descriptor to another
# directly in kernel space
sendfile on;
# send at maximum 1m before working on the next request in the queue
# (this avoids that someone with a very fast connection gets served exclusively)
sendfile_max_chunk 1m;
# below 8m use sendfile; otherwise direct IO (->aio)
directio 8m;
# https://thoughts.t37.net/nginx-optimization-understanding-sendfile-tcp-nodelay-and-tcp-nopush-c55cdd276765
# Combined to sendfile, tcp_nopush ensures that the packets are full
# before being sent to the client
tcp_nopush on;
# bypass Naggle, and then send the data as soon as it’s available
tcp_nodelay on;
# do not log access to video files
access_log off;
#### rate limits / throttle ####
# allow 7 megabytes to be downloaded at once (timeline images, moov atom, et al)
limit_rate_after 7m;
# 1 megabyte per second
limit_rate 1m;
# allow up to x connections from one IP (Studentenwerk is excepted by conn_addr zone)
limit_conn conn_addr 100;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment