Skip to content

Instantly share code, notes, and snippets.

@bennadel
Last active March 23, 2024 16:24
Show Gist options
  • Save bennadel/1852cfac78217ac09b77b5533906c794 to your computer and use it in GitHub Desktop.
Save bennadel/1852cfac78217ac09b77b5533906c794 to your computer and use it in GitHub Desktop.
From Noob To Docker On DigitalOcean With Nginx, Node.js, DataDog Logs, DogStatsD, And LetsEncrypt SSL Certificates
.*
Dockerfile
node_modules
upstream web_containers {
server web_1:8080;
server web_2:8080;
}
server {
listen 80;
server_name localhost;
#
# This is the folder that is being used for the LetsEncrypt webroot
# plug-in-based authentication challenge. The LetsEncrypt certbot will
# store files in this directory, which will then be requested over :80
# by the LetsEncrypt authority.
#
location /.well-known/acme-challenge {
root /tmp/letsencrypt/www;
}
location / {
proxy_pass http://web_containers;
proxy_set_header Host $host;
proxy_set_header Referer $http_referer;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
server {
listen 443;
server_name localhost;
ssl on;
#
# These two files will be generated by the LetsEncrypt certbot and be
# placed on the HOST machine. We can then pull them in through the host
# volume mounting.
#
ssl_certificate /etc/letsencrypt/live/dailyprime.me/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/dailyprime.me/privkey.pem;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_dhparam /etc/ssl/private/dhparams.pem;
location / {
proxy_pass http://web_containers;
proxy_set_header Host $host;
proxy_set_header Referer $http_referer;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
upstream web_containers {
server web_1:8080;
server web_2:8080;
}
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://web_containers;
proxy_set_header Host $host;
proxy_set_header Referer $http_referer;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
upstream web_containers {
server web_1:8080;
server web_2:8080;
}
server {
listen 80;
server_name localhost;
#
# This is the folder that is being used for the LetsEncrypt webroot
# plug-in-based authentication challenge. The LetsEncrypt certbot will
# store files in this directory, which will then be requested over :80
# by the LetsEncrypt authority.
#
location /.well-known/acme-challenge {
root /tmp/letsencrypt/www;
}
location / {
proxy_pass http://web_containers;
proxy_set_header Host $host;
proxy_set_header Referer $http_referer;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
version: "3"
services:
web_1:
image: dailyprime/web:latest
container_name: web_1
restart: unless-stopped
networks:
- dailyprime-net
environment:
DOGSTATSD_HOST: dd_agent
DOGSTATSD_PORT: 8125
NODE_ENV: production
PORT: 8080
links:
- dd_agent
web_2:
image: dailyprime/web:latest
container_name: web_2
restart: unless-stopped
networks:
- dailyprime-net
environment:
DOGSTATSD_HOST: dd_agent
DOGSTATSD_PORT: 8125
NODE_ENV: production
PORT: 8080
links:
- dd_agent
proxy:
image: dailyprime/proxy-prod:latest
container_name: proxy
restart: unless-stopped
networks:
- dailyprime-net
ports:
- "80:80"
- "443:443"
volumes:
- /etc/letsencrypt:/etc/letsencrypt
- /tmp/letsencrypt/www:/tmp/letsencrypt/www
links:
- web_1
- web_2
dd_agent:
image: datadog/agent:latest
container_name: dd_agent
networks:
- dailyprime-net
environment:
DD_API_KEY: ____REPLACE_WITH_YOUR_KEY____
DD_DOGSTATSD_NON_LOCAL_TRAFFIC: "true"
DD_LOGS_ENABLED: "true"
volumes:
- /opt/datadog-agent/conf.d:/conf.d:ro
- /opt/datadog-agent/run:/opt/datadog-agent/run:rw
- /proc/:/host/proc/:ro
- /sys/fs/cgroup/:/host/sys/fs/cgroup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
dailyprime-net:
driver: bridge
version: "3"
services:
web:
build: ./web/.
container_name: web
command: npm run dev
restart: unless-stopped
networks:
dailyprime-net:
# When running locally, the nginx proxy is expecting two upstream servers to be
# available. However, locally, I don't need high-availability. As such, I'm going
# to create one instance of "web" and just give it the two aliases that are being
# used in the nginx proxy-pass definition.
aliases:
- web_1
- web_2
environment:
DOGSTATSD_HOST: dd_agent
DOGSTATSD_PORT: 8125
NODE_ENV: development
PORT: 8080
volumes:
- ./web:/var/www/dailyprime.me
- /var/www/dailyprime.me/node_modules
links:
- dd_agent
proxy:
build: ./proxy/.
container_name: proxy
restart: unless-stopped
networks:
- dailyprime-net
ports:
- 80:80
links:
- web
dd_agent:
build: ./statsd-mock/.
container_name: dd_agent
restart: unless-stopped
networks:
- dailyprime-net
networks:
dailyprime-net:
driver: bridge
FROM nginx:1.13.9-alpine
#
# This defines the site configuration for the local-development nginx.
#
COPY ./default.conf /etc/nginx/conf.d/
FROM nginx:1.13.9-alpine
#
# This label becomes available as meta-data on the running container. We can
# then use this label in the DataDogHQ log aggregation configuration to define
# which Docker services the DataDog agent should watch for logs.
#
LABEL me.dailyprime.service="proxy"
#
# The dhparams.pem file defines how OpenSSL performs the Diffie-Hellman (DH)
# key-exchange. I generated this file locally using:
# ---
# openssl dhparam -out dhparam.pem 4096
# ---
#
COPY ./dhparams.pem /etc/ssl/private/
#
# This defines the site configuration for the production nginx.
#
COPY ./default.conf /etc/nginx/conf.d/
FROM node:9.7.0-alpine
WORKDIR /var/www/statsd
COPY ./index.js ./
CMD [ "node", "./index.js" ]
FROM node:9.7.0-alpine
#
# This label becomes available as meta-data on the running container. We can
# then use this label in the DataDogHQ log aggregation configuration to define
# which Docker services the DataDog agent should watch for logs.
#
LABEL me.dailyprime.service="web"
WORKDIR /var/www/dailyprime.me
#
# Copy over the node installation files and install the modules.
# --
# NOTE: Since we are not mounting the node_modules folder, you have to run
# "npm install" from within the container.
#
COPY ./package.json ./package-lock.json ./
RUN npm install
COPY ./ ./
#
# It is apparently best practice to not run the docker container as root.
# However, if I run it as "node", then I can't figure out how to bash-in and
# augment the node_modules (was getting a permission denied). The "sudo"
# command didn't seem to exist. So, I'm commenting this directive out while
# I actively develop the container. Then, when I'm ready to build it for
# deployment, I put it back in place.
# --
# NOTE: The node-alpine image comes pre-installed with the "node" user.
#
USER node
CMD [ "node", "./wwwroot/index.js" ]
const dgram = require('dgram');
const server = dgram.createSocket('udp4');
server.on('error', (err) => {
console.log(`Server error:\n${err.stack}`);
server.close();
});
server.on('message', (msg, rinfo) => {
console.log(`Server got: ${msg} from ${rinfo.address}:${rinfo.port}`);
});
server.on('listening', () => {
const address = server.address();
console.log(`Server listening ${address.address}:${address.port}`);
});
server.bind(8125);
// Require core modules.
var express = require( "express" );
var DogStatsD = require( "hot-shots" );
var os = require( "os" );
var path = require( "path" );
// ----------------------------------------------------------------------------------- //
// ----------------------------------------------------------------------------------- //
// Create our DogStatsD client.
var statsd = new DogStatsD({
host: process.env.DOGSTATSD_HOST,
port: process.env.DOGSTATSD_PORT,
errorHandler: function ( error ) {
console.log(
JSON.stringify({
log_level: "error",
error: error
})
);
}
});
var app = express();
// Route all calls to "/assets" to the in-built static-file module.
app.use(
"/assets",
express.static( path.join( __dirname, "assets" ) )
);
// Define our hello-world route.
app.all(
"/",
function ( request, response, next ) {
statsd.increment( "page_request" );
console.log(
JSON.stringify({
log_level: "trace",
message: `Handling request on web host ${ os.hostname() }.`
})
);
response.sendFile(
( __dirname + "/index.htm" ),
{
headers: {
"X-Host-Name": os.hostname()
}
}
);
}
);
var port = ( process.env.PORT || 8080 );
// Wire the Expression application to an exposed port so that it can start
// receiving traffic.
app.listen(
port,
function () {
console.log( "Express server has bootstrapped on port:", port );
}
);
init_config:
instances:
logs:
- type: docker
label: me.dailyprime.service:proxy
service: dailyprime
source: nginx
#!/bin/bash
/usr/bin/docker run -it --rm --name letsencrypt \
-v "/etc/letsencrypt:/etc/letsencrypt" \
quay.io/letsencrypt/letsencrypt:latest \
renew &&
#
# When a certificate is renewed, the nginx proxy container needs to be restarted in order
# to pick up the new certificates. However, according to the documentation:
#
# > certbot renew exit status will only be 1 if a renewal attempt failed. This means
# > certbot renew exit status will be 0 if no cert needs to be updated. If you write a
# > custom script and expect to run a command only after a cert was actually renewed you
# > will need to use the --post-hook since the exit status will be 0 both on successful
# > renewal and when renewal is not necessary.
#
# However, since the certbot is running inside a container, it doesn't have access to
# the nginx process, at least as far as I know - I'm a Docker noob. As such, I'm going to
# have this script restart nginx no matter what the outcome. And, since this will only
# happen occasionally, and nginx restarts really fast, this will only lead to a tiny bit
# of down time.
#
# NOTE: We are using absolute paths to the docker-compose command and to the config file
# because we can't depend on the PATH or the context of the crontab execution. As such,
# this makes it very clear exactly what will be executed.
#
/usr/local/bin/docker-compose -f /root/docker-compose.yml restart proxy &&
#
# I sent a DogStatsD metric to DataDog, recording the successful completion of the
# certificate renewal.
#
/usr/local/bin/docker-compose -f /root/docker-compose.yml exec dd_agent bash -c "echo 'script.renew_certificate:1|c' > /dev/udp/127.0.0.1/8125"
#!/bin/bash
#
# /etc/letsencrypt
# WHAT: This is the default configuration directory. This is where certbot will store all
# generated keys and issues certificates.
#
# /var/lib/letsencrypt
# WHAT: This is default working directory.
#
# /tmp/letsencrypt/www
# WHAT: This is the webroot into which the authentication challenge files will be placed.
# We're going to map this file to the root of the "/.well-known/acme-challenge" location
# in our nginx configuration.
#
# certonly
# WHAT: This certbot subcommand tells certbot to obtain the certificate but not not
# install it. We don't need to install it because we will be linking directly to the
# generated certificate files from within our subsequent nginx configuration.
#
# -d
# WHAT: Defines one of the domains to be used in the certificate. We can have up to 100
# domains in a single certificate.
#
# --authenticator webroot
# WHAT: We are going to use the webroot plug-in for the LetsEncrypt authentication
# challenge. This means that we're going to satisfy the ownership requirement by placing
# files in a public folder on our web-server.
#
# --webroot-path /var/www
# WHAT: This is the folder that we're going to use as the web-root in our "webroot"
# plug-in authentication challenge. Notice that we are mapping this folder to the
# "/tmp/letsencrypt/www" folder on the host machine (which will subsequently be included
# in our nginx configuration).
#
# --renew-by-default
# WHAT: Re-issue the certificate even if it already exists and is not expiring.
#
# --server https://acme-v01.api.letsencrypt.org/directory
# WHAT: ??? The resource that provides the actual certificate ???
#
docker run -it --rm --name letsencrypt \
-v "/etc/letsencrypt:/etc/letsencrypt" \
-v "/var/lib/letsencrypt:/var/lib/letsencrypt" \
-v "/tmp/letsencrypt/www:/var/www" \
quay.io/letsencrypt/letsencrypt:latest \
certonly \
-d dailyprime.me \
-d www.dailyprime.me \
--authenticator webroot \
--webroot-path /var/www \
--renew-by-default \
--server https://acme-v01.api.letsencrypt.org/directory
init_config:
instances:
logs:
- type: docker
label: me.dailyprime.service:web
service: dailyprime
source: nodejs
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment