Skip to content

Instantly share code, notes, and snippets.

@revant
Last active November 29, 2023 11:23
Show Gist options
  • Star 7 You must be signed in to star a gist
  • Fork 3 You must be signed in to fork a gist
  • Save revant/6a67f775aa8198f9b306f32d6872799f to your computer and use it in GitHub Desktop.
Save revant/6a67f775aa8198f9b306f32d6872799f to your computer and use it in GitHub Desktop.
ERPNext on Docker Swarm

Follow these steps:

Install prerequisites:

apt-get update && apt-get dist-upgrade -y
dpkg-reconfigure --priority=low unattended-upgrades
adduser -D craft
usermod -aG sudo craft
curl -fsSL https://get.docker.com | bash
usermod -aG docker craft
su - craft
  • Install traefik and portainer using guide available at dockerswarm.rocks
    • Use local-traefik.yaml and local-portainer.yaml for local setup. Read comments in the file.
  • Add mariadb.yaml
  • Add erpnext-v13.yaml
  • Add configure-erpnext-v13.yaml
  • Add swarm-cron.yaml
  • Add erpnext-v13-backup.yaml
  • Add erpnext-v13-delete-old-backup.yaml
    • Needs delete-backups.py added as config delete-backups
version: '3.7'
services:
configurator:
image: frappe/erpnext-worker:${VERSION:?No image version set}
volumes:
- sites:/home/frappe/frappe-bench/sites
command: configure.py
deploy:
restart_policy:
condition: none
environment:
DB_HOST: mariadb_db
DB_PORT: 3306
REDIS_CACHE: redis-cache:6379
REDIS_QUEUE: redis-queue:6379
REDIS_SOCKETIO: redis-socketio:6379
SOCKETIO_PORT: 9000
volumes:
sites:
external: true
# set to name of the sites volume
name: erpnext-v13_sites
#!/home/frappe/frappe-bench/env/bin/python
import argparse
import datetime
import boto3
def delete_files_before_datetime(
bucket,
before_number_of_days=3,
region_name=None,
api_version=None,
use_ssl=True,
verify=None,
endpoint_url=None,
aws_access_key_id=None,
aws_secret_access_key=None,
aws_session_token=None,
config=None,
prefix=None,
dry_run=None,
):
before_date = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(
days=before_number_of_days
)
s3 = boto3.client(
service_name="s3",
region_name=region_name,
api_version=api_version,
use_ssl=use_ssl,
verify=verify,
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
config=config,
)
response = s3.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
)
keys_to_delete = [
{"Key": object["Key"]}
for object in response["Contents"]
if object["LastModified"] < before_date
]
print("Deleting files...")
for key in keys_to_delete:
print(key.get("Key"))
if not dry_run:
s3.delete_objects(Bucket=bucket, Delete={"Objects": keys_to_delete})
print("Files deleted successfully")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--prefix", required=True)
parser.add_argument("--bucket", required=True)
parser.add_argument("--before-days", required=True, type=int)
parser.add_argument("--region", required=True)
parser.add_argument("--endpoint-url", required=True)
# Looking for default AWS credentials variables
parser.add_argument("--aws-access-key-id", required=True)
parser.add_argument("--aws-secret-access-key", required=True)
parser.add_argument("--dry-run", action="store_true")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
delete_files_before_datetime(
prefix=args.prefix,
bucket=args.bucket,
before_number_of_days=args.before_days,
region_name=args.region,
endpoint_url=args.endpoint_url,
aws_access_key_id=args.aws_access_key_id,
aws_secret_access_key=args.aws_secret_access_key,
dry_run=args.dry_run,
)
version: "3.7"
services:
backup:
deploy:
mode: replicated
replicas: 0
labels:
- "swarm.cronjob.enable=true"
- "swarm.cronjob.schedule=0 */6 * * *"
- "swarm.cronjob.skip-running=false"
restart_policy:
condition: none
image: frappe/erpnext-worker:${VERSION:?No image version set}
entrypoint: ["bash", "-c"]
command:
- |
for SITE in $$(/home/frappe/frappe-bench/env/bin/python -c "import frappe;print(' '.join(frappe.utils.get_sites()))")
do
bench --site $$SITE backup --with-files
push-backup \
--site $$SITE \
--bucket $$BUCKET_NAME \
--region-name $$REGION \
--endpoint-url $$ENDPOINT_URL \
--aws-access-key-id $$ACCESS_KEY_ID \
--aws-secret-access-key $$SECRET_ACCESS_KEY
done
environment:
- BUCKET_NAME=bucketname
- REGION=region
- ACCESS_KEY_ID=access_key
- SECRET_ACCESS_KEY=secret_access_key
- ENDPOINT_URL=https://endpoint.url.com
volumes:
- "sites:/home/frappe/frappe-bench/sites"
networks:
- mariadb-network
- erpnext-v13
networks:
mariadb-network:
external: true
name: mariadb-network
erpnext-v13:
external: true
# set to name of the bench network
name: erpnext-v13
volumes:
sites:
external: true
# set to name of the sites volume
name: erpnext-v13_sites
# Note: set VERSION and S3 credentials
version: "3.7"
services:
backup:
deploy:
mode: replicated
replicas: 0
labels:
- "swarm.cronjob.enable=true"
- "swarm.cronjob.schedule=0 0 0 * * *"
- "swarm.cronjob.skip-running=false"
restart_policy:
condition: none
image: frappe/erpnext-worker:${VERSION:?No image version set}
entrypoint: ["bash", "-c"]
command:
- |
for SITE in $$(/home/frappe/frappe-bench/env/bin/python -c "import frappe;print(' '.join(frappe.utils.get_sites()))")
do
delete-backups \
--bucket=bucketname \
--before-days=3 \
--region=region \
--endpoint-url=https://endpoint.url.com \
--aws-access-key-id=access_key \
--aws-secret-access-key=secret_access_key \
--prefix=$$SITE
done
volumes:
- "sites:/home/frappe/frappe-bench/sites"
networks:
- mariadb-network
- erpnext-v13
configs:
- source: delete-backups
target: /usr/bin/delete-backups
uid: '1000'
gid: '1000'
mode: 0550
networks:
mariadb-network:
external: true
name: mariadb-network
erpnext-v13:
external: true
# set to name of the bench network
name: erpnext-v13
volumes:
sites:
external: true
# set to name of the sites volume
name: erpnext-v13_sites
configs:
delete-backups:
external: true
# Note: set VERSION and S3 credentials
version: "3.7"
services:
backend:
image: frappe/erpnext-worker:${VERSION:?No image version set}
deploy:
restart_policy:
condition: on-failure
volumes:
- sites:/home/frappe/frappe-bench/sites
- assets:/home/frappe/frappe-bench/sites/assets
networks:
- mariadb-network
- bench-network
frontend:
image: frappe/erpnext-nginx:${VERSION}
deploy:
restart_policy:
condition: on-failure
labels:
traefik.docker.network: traefik-public
traefik.enable: "true"
traefik.constraint-label: traefik-public
traefik.http.middlewares.prod-redirect.redirectscheme.scheme: https
# Change router name prefix from erpnext-v13 to the name of stack in case of multi bench setup
traefik.http.routers.erpnext-v13-http.rule: Host(${SITES:?No sites set})
traefik.http.routers.erpnext-v13-http.entrypoints: http
# Remove following lines in case of local setup
traefik.http.routers.erpnext-v13-http.middlewares: prod-redirect
traefik.http.routers.erpnext-v13-https.rule: Host(${SITES})
traefik.http.routers.erpnext-v13-https.entrypoints: https
traefik.http.routers.erpnext-v13-https.tls: "true"
traefik.http.routers.erpnext-v13-https.tls.certresolver: le
# Remove above lines in case of local setup
# Uncomment and change domain for non-www to www redirect
# traefik.http.routers.erpnext-v13-https.middlewares: nonwwwtowww
# traefik.http.middlewares.nonwwwtowww.redirectregex.regex: ^https?://(?:www\.)?(.*)
# traefik.http.middlewares.nonwwwtowww.redirectregex.replacement: https://www.$$1
traefik.http.services.erpnext-v13.loadbalancer.server.port: "8080"
environment:
BACKEND: backend:8000
FRAPPE_SITE_NAME_HEADER: $$host
SOCKETIO: websocket:9000
UPSTREAM_REAL_IP_ADDRESS: 127.0.0.1
UPSTREAM_REAL_IP_HEADER: X-Forwarded-For
UPSTREAM_REAL_IP_RECURSIVE: "off"
volumes:
- sites:/usr/share/nginx/html/sites
- assets:/usr/share/nginx/html/sites/assets
networks:
- mariadb-network
- bench-network
- traefik-public
queue-default:
image: frappe/erpnext-worker:${VERSION}
deploy:
restart_policy:
condition: on-failure
command:
- bench
- worker
- --queue
- default
volumes:
- sites:/home/frappe/frappe-bench/sites
- assets:/home/frappe/frappe-bench/sites/assets
networks:
- mariadb-network
- bench-network
queue-long:
image: frappe/erpnext-worker:${VERSION}
deploy:
restart_policy:
condition: on-failure
command:
- bench
- worker
- --queue
- long
volumes:
- sites:/home/frappe/frappe-bench/sites
- assets:/home/frappe/frappe-bench/sites/assets
networks:
- mariadb-network
- bench-network
queue-short:
image: frappe/erpnext-worker:${VERSION}
deploy:
restart_policy:
condition: on-failure
command:
- bench
- worker
- --queue
- short
volumes:
- sites:/home/frappe/frappe-bench/sites
- assets:/home/frappe/frappe-bench/sites/assets
networks:
- mariadb-network
- bench-network
redis-cache:
image: redis:6.2-alpine
deploy:
restart_policy:
condition: on-failure
volumes:
- redis-cache-data:/data
networks:
- bench-network
redis-queue:
image: redis:6.2-alpine
deploy:
restart_policy:
condition: on-failure
volumes:
- redis-queue-data:/data
networks:
- bench-network
redis-socketio:
image: redis:6.2-alpine
deploy:
restart_policy:
condition: on-failure
volumes:
- redis-socketio-data:/data
networks:
- bench-network
scheduler:
image: frappe/erpnext-worker:${VERSION}
deploy:
restart_policy:
condition: on-failure
command:
- bench
- schedule
volumes:
- sites:/home/frappe/frappe-bench/sites
- assets:/home/frappe/frappe-bench/sites/assets
networks:
- mariadb-network
- bench-network
websocket:
image: frappe/frappe-socketio:${FRAPPE_VERSION:?No frappe image version set}
deploy:
restart_policy:
condition: on-failure
volumes:
- sites:/home/frappe/frappe-bench/sites
- assets:/home/frappe/frappe-bench/sites/assets
networks:
- mariadb-network
- bench-network
# Add migrate-sites-script.sh as migrate-sites-script config
# and uncomment following to enable migration on stack (re)deloyment.
# migration:
# image: frappe/erpnext-worker:${VERSION}
# deploy:
# restart_policy:
# condition: none
# entrypoint: ["bash", "-c"]
# command:
# - |
# for SITE in $$(/home/frappe/frappe-bench/env/bin/python -c "import frappe;print(' '.join(frappe.utils.get_sites()))")
# do
# bench --site $$SITE set-config -p maintenance_mode 1
# bench --site $$SITE set-config -p pause_scheduler 1
# bench --site $$SITE migrate
# bench --site $$SITE set-config -p maintenance_mode 0
# bench --site $$SITE set-config -p pause_scheduler 0
# done
# volumes:
# - sites:/home/frappe/frappe-bench/sites
# - assets:/home/frappe/frappe-bench/sites/assets
# networks:
# - mariadb-network
# - bench-network
# populate-assets:
# image: frappe/erpnext-nginx:${VERSION}
# deploy:
# restart_policy:
# condition: none
# entrypoint: ["sh", "-c"]
# command:
# - >
# cp -fR /usr/share/nginx/html/assets /data;
# touch /data/sites/.build;
# volumes:
# - assets:/data/assets:rw
# - sites:/data/sites:rw
volumes:
assets:
db-data:
redis-cache-data:
redis-queue-data:
redis-socketio-data:
sites:
networks:
bench-network:
# Change network name from erpnext-v13 to the name of stack in case of multi bench setup
name: erpnext-v13
external: false
mariadb-network:
name: mariadb-network
external: true
traefik-public:
name: traefik-public
external: true
# Note: set VERSION, FRAPPE_VERSION and SITES
# Execute following before deploying this compose yaml
# echo 'PORTAINER_DOMAIN=portainer.localhost' > .env
# env $(cat .env | xargs) docker stack deploy -c local-portainer.yaml portainer
version: '3.7'
services:
agent:
image: portainer/agent:2.11.1
environment:
AGENT_CLUSTER_ADDR: tasks.agent
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
networks:
- agent-network
deploy:
mode: global
placement:
constraints:
- node.platform.os == linux
portainer:
image: portainer/portainer-ce:2.11.1
command: -H tcp://tasks.agent:9001 --tlsskipverify
volumes:
- portainer-data:/data
networks:
- agent-network
- traefik-public
deploy:
placement:
constraints:
- node.role == manager
labels:
- traefik.enable=true
- traefik.docker.network=traefik-public
- traefik.constraint-label=traefik-public
- traefik.http.routers.portainer-http.rule=Host(`${PORTAINER_DOMAIN?Variable not set}`)
- traefik.http.routers.portainer-http.entrypoints=http
- traefik.http.services.portainer.loadbalancer.server.port=9000
networks:
agent-network:
attachable: true
traefik-public:
external: true
volumes:
portainer-data:
# Execute following before deploying this compose yaml
# echo 'TRAEFIK_DOMAIN=traefik.localhost' > .env
# echo 'HASHED_PASSWORD='$(openssl passwd -apr1 changeit | sed 's/\$/\\\$/g') >> .env
# env $(cat .env | xargs) docker stack deploy -c local-traefik.yaml traefik
version: '3.7'
services:
traefik:
image: traefik:v2.6
ports:
- target: 80
published: 80
mode: host
- target: 443
published: 443
mode: host
deploy:
labels:
- traefik.enable=true
- traefik.docker.network=traefik-public
- traefik.constraint-label=traefik-public
- traefik.http.middlewares.admin-auth.basicauth.users=admin:${HASHED_PASSWORD?Variable not set}
# Uses the environment variable TRAEFIK_DOMAIN
- traefik.http.routers.traefik-public-http.rule=Host(`${TRAEFIK_DOMAIN?Variable not set}`)
- traefik.http.routers.traefik-public-http.entrypoints=http
# Use the special Traefik service api@internal with the web UI/Dashboard
- traefik.http.routers.traefik-public-http.service=api@internal
- traefik.http.routers.traefik-public-http.middlewares=admin-auth
# Define the port inside of the Docker service to use
- traefik.http.services.traefik-public.loadbalancer.server.port=8080
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- traefik-public-certificates:/certificates
command:
- --providers.docker
- --providers.docker.exposedbydefault=false
- --providers.docker.swarmmode
- --entrypoints.http.address=:80
- --accesslog
- --log
- --api
networks:
- traefik-public
volumes:
traefik-public-certificates:
networks:
traefik-public:
name: traefik-public
external: false
version: "3.7"
services:
db:
image: mariadb:10.6
healthcheck:
test: mysqladmin ping -h localhost --password=${DB_PASSWORD}
interval: 1s
retries: 15
command:
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_unicode_ci
- --skip-character-set-client-handshake
- --skip-innodb-read-only-compressed
environment:
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:?No db password set}
volumes:
- db-data:/var/lib/mysql
networks:
- mariadb-network
volumes:
db-data:
networks:
mariadb-network:
name: mariadb-network
attachable: true
# Note: set DB_PASSWORD
version: "3.2"
services:
swarm-cronjob:
image: crazymax/swarm-cronjob
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
environment:
- "TZ=Asia/Kolkata"
- "LOG_LEVEL=info"
- "LOG_JSON=false"
deploy:
placement:
constraints:
- node.role == manager
@revant
Copy link
Author

revant commented Jan 25, 2023

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment