Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save heartshare/c49aee9c49f9c2cc9ca3d38fba420501 to your computer and use it in GitHub Desktop.
Save heartshare/c49aee9c49f9c2cc9ca3d38fba420501 to your computer and use it in GitHub Desktop.
multi-node-docker-swarm-setup
# Not a production ready deployment solution.
# Shell commands to enable docker swarm on multiple Ubuntu nodes.
# Use these commands for reference.
# Copy ssh public key to all nodes
# Will ask for root password, once the ssh successful root ssh public key will be added to docker manager node
for host in host1 host2 host3 host4
do
ssh-copy-id root@${host}
done
# Install docker
for host in host1 host2 host3 host4
do
ssh root@${host} <<'ENDSSH'
set -xe
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
sudo gpasswd -a $USER docker
ENDSSH
done
# Verify docker is installed
for host in host1 host2 host3 host4
do
ssh root@${host} <<'ENDSSH'
docker ps
ENDSSH
done
# Either reuse root .ssh directory or create a sperate ssh keys for Ubuntu user
# Create Ubuntu user and copy root .ssh to ubuntu user.
# User add fails here but user should be added
for host in host1 host2 host3 host4
do
ssh root@${host} <<'ENDSSH'
set -xe
adduser ubuntu
ENDSSH
done
# Complete ubuntu user setup
# Set up home directory
# Enable passwordless sudo
for host in host1 host2 host3 host4
do
ssh root@${host} <<'ENDSSH'
set -xe
usermod -aG sudo ubuntu
sudo gpasswd -a ubuntu docker
echo "ubuntu ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers
cp -r ~/.ssh /home/ubuntu
chown ubuntu.ubuntu -R /home/ubuntu/.ssh
ENDSSH
done
# Check ssh with ubuntu user
for host in host1 host2 host3 host4
do
ssh ubuntu@${host} <<'ENDSSH'
date
ENDSSH
done
# Disable root login and password authentication
for host in host1 host2 host3 host4
do
ssh ubuntu@${host} <<'ENDSSH'
sudo sed -i 's/#\?\(PermitRootLogin\s*\).*$/\1 no/' /etc/ssh/sshd_config
sudo sed -i 's/#\?\(PermitEmptyPasswords\s*\).*$/\1 no/' /etc/ssh/sshd_config
sudo sed -i 's/#\?\(PasswordAuthentication\s*\).*$/\1 no/' /etc/ssh/sshd_config
sudo service ssh reload
ENDSSH
done
# Optional step
# We store docker swarm manger and node tokens in AWS Secrets Manager.
# If secrets manager is not used, skip all AWS steps
# Install aws cli
for host in host1 host2 host3 host4
do
ssh ubuntu@${host} <<'ENDSSH'
sudo apt install git make unzip -y
sudo apt update && sudo apt upgrade
sudo apt install wget build-essential libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev libffi-dev zlib1g-dev -y
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt install python3.11 -y
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt install software-properties-common -y
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt update
sudo apt install python3.11 -y
unzip awscli-bundle.zip
sudo ln -s /usr/bin/python3 /usr/bin/python
sudo apt install python3.11-venv -y
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
ENDSSH
done
# Docker logs fill up the disk so set a reasonable rotate frequency.
# Sets log rotate to delete container logs every 1 day. Update 1 to any other value
for host in host1 host2 host3 host4
do
ssh ubuntu@${host} <<'ENDSSH'
sudo bash -c 'cat <<EOF > /etc/logrotate.d/docker-container
/var/lib/docker/containers/*/*.log {
rotate 1
daily
compress
missingok
delaycompress
copytruncate
}
EOF'
ENDSSH
done
# For Ubuntu enable firewall
# 10.0.0.x IPs are node IPs
for host in host1 host2 host3 host4
do
ssh ubuntu@${host} <<'ENDSSH'
sudo ufw status
sudo ufw allow from 10.0.0.1
sudo ufw allow from 10.0.0.2
sudo ufw allow from 10.0.0.3
sudo ufw allow from 10.0.0.4
sudo ufw allow from 10.0.0.5
sudo ufw default deny incoming
yes | sudo ufw enable
sudo ufw status numbered
ENDSSH
done
# Copy AWS Credentials. If the instances are in AWS use instance roles
for host in host1 host2 host3 host4
do
scp -r ~/.aws ubuntu@${host}:~/
done
# Update hosts file on each node so the nodes can resolve the host name to internal IP
for host in host1 host2 host3 host4
do
ssh ubuntu@${host} <<'ENDSSH'
sudo bash -c 'echo "10.0.0.2 host1" >> /etc/hosts'
sudo bash -c 'echo "10.0.0.3 host2" >> /etc/hosts'
sudo bash -c 'echo "10.0.0.5 host3" >> /etc/hosts'
sudo bash -c 'echo "10.0.0.4 host4" >> /etc/hosts'
ENDSSH
done
# Initialize docker swarm on the first node
# Provide the internal IP where docker swarm will listen for swarm join
ssh ubuntu@host1 "docker swarm leave --force; docker swarm init --advertise-addr 10.0.0.2"
# Save the join tokens to aws ssm
aws ssm put-parameter --name "docker-manger-join-token" --type "SecureString" --value `docker swarm join-token manager -q` --overwrite
aws ssm put-parameter --name "docker-worker-join-token" --type "SecureString" --value `docker swarm join-token worker -q` --overwrite
# Join rest of the nodes as manager to the swarm
# Use docker-worker-join-token if the other nodes should join as worker
for host in host2 host3 host4
do
# Leave and rejoin swarm
ssh "ubuntu@${host}" "docker swarm leave --force"
MANAGER_JOIN_TOKEN=$(aws ssm get-parameters --name docker-manger-join-token --with-decryption --query "Parameters[*].{Value:Value}" --output text)
ssh "ubuntu@${host}" "docker swarm join --token ${MANAGER_JOIN_TOKEN}" host1:2377
done
# SSH into the first node and verify all nodes have joined the cluster
ubuntu@host1:~$ docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
sr0n71ax6l6wi6ldfi1u5bhov host1 Ready Active Reachable 24.0.3
9nldwnbkommiaampds9odo49c host2 Ready Active Reachable 24.0.3
kq0o1tuvpyw0040w0hsav90wl host3 Ready Active Reachable 24.0.3
pmgkb7agfbkfhfxqjsa38xsim * host4 Ready Active Leader 24.0.3
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment