Skip to content

Instantly share code, notes, and snippets.

@robkooper
Last active February 21, 2019 15:17
Show Gist options
  • Save robkooper/bf124eb2390b9678b9642e9fc38022a1 to your computer and use it in GitHub Desktop.
Save robkooper/bf124eb2390b9678b9642e9fc38022a1 to your computer and use it in GitHub Desktop.
Running PEcAn in docker swarm
# This file will override the configation options in the docker-compose
# file. Copy this file to the same folder as docker-compose as .env
# ----------------------------------------------------------------------
# GENERAL CONFIGURATION
# ----------------------------------------------------------------------
# Folder to store all data
DATA_DIR=/home/kooper/pecan
# ----------------------------------------------------------------------
# TRAEFIK CONFIGURATION
# ----------------------------------------------------------------------
# hostname of server
TRAEFIK_HOST=Host:pecan-agu.ncsa.illinois.edu;
# only allow access from localhost and NCSA
TRAEFIK_IPFILTER=172.16.0.0/12, 141.142.0.0/16, 0.0.0.0/0
# Run traffik on port 80 (http) and port 443 (https)
TRAEFIK_HTTP_PORT=80
TRAEFIK_HTTPS_PORT=443
TRAEFIK_HTTPS_OPTIONS=TLS
# enable SSL cerificate generation
#TRAEFIK_ACME_ENABLE=true
# Use you real email address here to be notified if cert expires
#TRAEFIK_ACME_EMAIL=pecanproj@gmail.com
# Always use https, trafic to http is redirected to https
#TRAEFIK_HTTP_REDIRECT=Redirect.EntryPoint:https
# ----------------------------------------------------------------------
# PEcAn CONFIGURATION
# ----------------------------------------------------------------------
# what version of pecan to use
PECAN_VERSION=develop
# ----------------------------------------------------------------------
# BETY CONFIGURATION
# ----------------------------------------------------------------------
# what version of BETY to use
BETY_VERSION=develop
# what is our server number, 99=vm, 98=docker
BETY_LOCAL_SERVER=98
# secret used to encrypt cookies in BETY
BETY_SECRET_KEY=-329i093043utirjdoij4iterfkdst04aweijgmda934wtjrfsdu3409wter
# ----------------------------------------------------------------------
# MINIO CONFIGURATION
# ----------------------------------------------------------------------
# minio username and password
MINIO_ACCESS_KEY=lala
MINIO_SECRET_KEY=lala
# ----------------------------------------------------------------------
# RABBITMQ CONFIGURATION
# ----------------------------------------------------------------------
# RabbitMQ username and password
RABBITMQ_DEFAULT_USER=lala
RABBITMQ_DEFAULT_PASS=lala
# create the correct URI with above username and password
RABBITMQ_URI=amqp://lala:lala@rabbitmq/%2F
version: "3"
services:
# webserver to handle all traffic. This can use let's encrypt to generate a SSL cert.
traefik:
image: traefik:latest
ports:
- 8080:8080
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# Allow to see all docker containers running, restart and see log files.
portainer:
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
# show docker swarm cluster stats, ip address below should point to the swarm master
clusterstats:
image: browndog/clusterstats:develop
command: --swarm 192.168.5.202 --timeout 30
networks:
- pecan
ports:
- 9999:9999
labels:
- "traefik.enable=true"
- "traefik.backend=clusterstats"
- "traefik.port=9999"
- "traefik.frontend.rule=${TRAEFIK_FRONTEND_RULE:-}PathPrefixStrip: /clusterstats"
- "traefik.website.frontend.whiteList.sourceRange=${TRAEFIK_IPFILTER:-172.16.0.0/12}"
labels:
- "traefik.enable=true"
- "traefik.backend=clusterstats"
- "traefik.port=9999"
- "traefik.frontend.rule=${TRAEFIK_FRONTEND_RULE:-}PathPrefixStrip: /clusterstats"
- "traefik.website.frontend.whiteList.sourceRange=${TRAEFIK_IPFILTER:-172.16.0.0/12}"
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
volumes:
- clusterstats:/data
# ----------------------------------------------------------------------
# Access to the files generated and used by PEcAn, both through a
# web interface (minio) as well using the thredds server.
# ----------------------------------------------------------------------
# webserver to handle access to data
minio:
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
# THREDDS data server
thredds:
deploy:
mode: replicated
replicas: 0
placement:
constraints:
- node.role == worker
# ----------------------------------------------------------------------
# Job management system. Jobs are distributed through the message
# system. PEcAn uses this to distribute the work/load across multiple
# containers.
# ----------------------------------------------------------------------
# rabbitmq to connect to extractors
rabbitmq:
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
# ----------------------------------------------------------------------
# Database to hold the data from PEcAn and BETY.
# ----------------------------------------------------------------------
# postgresql + postgis to hold all the data
postgres:
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
# ----------------------------------------------------------------------
# BETY rails frontend to the database
# ----------------------------------------------------------------------
bety:
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
# ----------------------------------------------------------------------
# PEcAn application
# ----------------------------------------------------------------------
# PEcAn documentation as well as PEcAn home page
docs:
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
# PEcAn web front end, this is just the PHP code
web:
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
# PEcAn model monitor
monitor:
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
# PEcAn executor, executes jobs. Does not the actual models
executor:
deploy:
mode: replicated
replicas: 3
placement:
constraints:
- node.role == worker
# ----------------------------------------------------------------------
# PEcAn models, list each model you want to run below
# ----------------------------------------------------------------------
# PEcAn sipnet model runner
sipnet:
deploy:
mode: replicated
replicas: 5
placement:
constraints:
- node.role == worker
# PEcAn ED model runner
ed2:
deploy:
mode: replicated
replicas: 5
placement:
constraints:
- node.role == worker
# PEcAn ED model runner
maespa:
deploy:
mode: replicated
replicas: 5
placement:
constraints:
- node.role == worker
# ----------------------------------------------------------------------
# Name of network to be used by all containers
# ----------------------------------------------------------------------
networks:
pecan:
attachable: true
# ----------------------------------------------------------------------
# Volumes used by the PEcAn stack. These volumes are used to make sure
# we have persistent data.
# ----------------------------------------------------------------------
volumes:
clusterstats:
driver: local
driver_opts:
type: nfs
device: ":/data/clusterstats"
o: addr=192.168.5.202,nfsvers=3,nolock,soft,rw
traefik:
driver: local
driver_opts:
type: nfs
device: ":/data/traefik"
o: addr=192.168.5.202,nfsvers=3,nolock,soft,rw
postgres:
driver: local
driver_opts:
type: nfs
device: ":/data/postgres"
o: addr=192.168.5.202,nfsvers=3,nolock,soft,rw
rabbitmq:
driver: local
driver_opts:
type: nfs
device: ":/data/rabbitmq"
o: addr=192.168.5.202,nfsvers=3,nolock,soft,rw
pecan:
driver: local
driver_opts:
type: nfs
device: ":/data/pecan"
o: addr=192.168.5.202,nfsvers=3,nolock,soft,rw
portainer:
driver: local
driver_opts:
type: nfs
device: ":/data/portainer"
o: addr=192.168.5.202,nfsvers=3,nolock,soft,rw
#!/bin/bash
# FOLLOWING THROWS ERROR IN DOCKER
#export $(cat .env | grep ^[A-Z] | sed 's/, /,/g')
#docker stack deploy --compose-file docker-compose.yml --compose-file docker-compose.override.yml pecan
# USE docker-compose TO CREATE NEW CONFIG FILE
docker-compose --file docker-compose.yml --file docker-compose.override.yml config > stack.yml
docker stack deploy --compose-file stack.yml pecan
@robkooper
Copy link
Author

can now use:
http://server:9998 to docker swarm information (admin/secret)
http://localhost:8080/dashboard/ for trafik (need to ssh to machine)

@ashiklom
Copy link

Awesome, thanks! The docker-compose config is a neat trick -- I was wondering about the best way to do that.

@ashiklom
Copy link

So a handy thing I learned is that docker stack deploy -c can read from stdin. So I've been doing:

docker-compose config -p <project> | docker stack deploy -c - <project>

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment