Created
May 11, 2018 01:37
-
-
Save rcbop/c32c9684bb08d293195d0a04c30996c2 to your computer and use it in GitHub Desktop.
creates docker swarm cluster using aws ec2
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
#/ Description: | |
#/ Creates a docker swarm ec2 cluster | |
#/ Examples: | |
#/ DEBUG=true ./create-ec2-swarm-cluster.sh (Enable debug messages) | |
#/ NO_COLORS=true ./create-ec2-swarm-cluster.sh (Disable colors) | |
#/ -------------------------------------------------------------------------------- | |
#/ Author: Rogério Castelo Branco Peixoto (rcbpeixoto@gmail.com) | |
#/ -------------------------------------------------------------------------------- | |
usage() { grep '^#/' "$0" | cut -c4- ; exit 0 ; } | |
expr "$*" : ".*--help" > /dev/null && usage | |
BASEDIR=$(dirname "$0") | |
# Docker-machine params | |
DRIVER=${DRIVER:-'amazonec2'} | |
VPC_ID=${VPC_ID:-'vpc-123456'} | |
SUBNET_ID=${SUBNET_ID:-'subnet-123456'} | |
AV_ZONE=${AV_ZONE:-'c'} | |
INSTANCE_TYPE=${INSTANCE_TYPE:-'t2.large'} | |
KEY_PAIR_NAME=${KEY_PAIR_NAME:-'dev'} | |
KEY_PAIR_PATH=${KEY_PAIR_PATH:-'path-to-key.pem'} | |
INSTANCE_SIZE=${INSTANCE_SIZE:-'100'} # storage in Gb | |
SECGROUP_NAME=${SECGROUP_NAME:-'my-swarm-sg'} | |
EC2_AMI_DEFAULT_USER=${EC2_AMI_DEFAULT_USER:-'ubuntu'} | |
# AWS_AMI=${AWS_AMI:-'ami-1853ac65'} # Amazon Linux AMI 2017.09.1 (HVM), SSD Volume Type | |
# CLI params | |
AWS_TAGS=${AWS_TAGS:-'Name,swarm-cluster'} | |
AWS_PROFILE=${AWS_PROFILE:-'swarm-admin'} | |
AWS_REGION=${AWS_REGION:-'us-east-1'} | |
# swarm nodes | |
MANAGER_NAME=${MANAGER_NAME:-'swarm-manager01'} | |
declare -a WORKERS_ARRAY=("swarm-worker01" "swarm-worker02") | |
declare -a PORTS_TO_OPEN_ARRAY=("tcp:2377" "tcp:7946" "udp:7946" "tcp:4789" "udp:4789") | |
# DNS info | |
ROUTE_53_DOMAIN=${ROUTE_53_DOMAIN:-'mydomain.net'} | |
ROUTE_53_SUBDOMAIN=${ROUTE_53_SUBDOMAIN:-'swarmdev'} | |
# VPC | |
VPC_IPV4_CIDR=${VPC_IPV4_CIDR:-'172.32.0.0/24'} | |
TAG_PERFIX=${TAG_PERFIX:-'swarm'} | |
AWS_REGISTRY_ID=${AWS_REGISTRY_ID:-'xxxxx'} | |
AWS_REGION=${AWS_REGION:-'us-east-1'} | |
AWS_CLI_USER=${AWS_CLI_USER:-'root'} | |
AWS_KEY_ID=${AWS_KEY_ID:-''} | |
AWS_SECRET_KEY=${AWS_SECRET_KEY:-''} | |
IFS=' ' | |
STEP=0 | |
bump_step(){ | |
STEP=$(($STEP+1)) | |
log "${BLU}[INFO] ($STEP) $1${NC}" | |
} | |
log() { echo -e "${BWHT}["$(date "+%Y%m%d${NC}T${BWHT}%H%M%S")"]${NC} $*"; } | |
separator() { SEP=$(printf '%*s' 105 | tr ' ' '#') && log "${GRN}[INFO] $SEP${NC}"; } | |
info() { log "${GRN}[INFO] $1${NC}"; } | |
warning() { log "${YEL}[WARN] $1${NC}"; } | |
error() { log "${RED}[ERROR] $1${NC}"; } | |
fatal() { log "${MAG}[FATAL] $1${NC}"; exit 1 ; } | |
debug() { if [ "${DEBUG}" == "true" ]; then log "${CYN}[DEBUG] :: ${FUNCNAME[1]} :: $1 ${NC}"; fi } | |
json_escape () { | |
printf '%s' $1 | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))' | |
} | |
multi_debug() { | |
debug | |
if [ "${DEBUG}" == "true" ]; then | |
IFS=$'\n' | |
for line in $1 | |
do | |
DATE=$(date "+%Y%m%dT%H%M%S") | |
printf "[$DATE] [DEBUG] :: ${FUNCNAME[1]} :: $line\n" | |
done | |
fi | |
} | |
multi_info() { | |
IFS=$'\n' | |
for line in $1 | |
do | |
DATE=$(date "+%Y%m%dT%H%M%S") | |
printf "[$DATE] [INFO] :: ${FUNCNAME[1]} :: $line\n" | |
done | |
} | |
is_no_colors(){ | |
debug | |
[ ! -z "${NO_COLORS}" ] && [ "$NO_COLORS" == "true" ] | |
} | |
set_colors(){ | |
debug | |
export RED="\033[0;31m" BLU="\033[0;34m" GRN="\033[0;32m" YEL="\033[33;m" | |
export CYN="\033[0;36m" MAG="\033[35m" BWHT="\033[1m" NC="\033[0m" | |
} | |
unset_colors(){ | |
debug | |
export RED='' BLU='' YEL='' CYN='' GRN='' MAG='' | |
export NC='' BWHT='' | |
} | |
check_python(){ | |
debug | |
info "Checking Python" | |
if command -v python &>/dev/null; then | |
pyversion=$( { python --version; } 2>&1 ) | |
info "$pyversion" | |
info "Python installed!" | |
else | |
error "Python is not installed" | |
fatal "Please install python manually, exiting..." | |
fi | |
} | |
check_pip(){ | |
debug | |
info "Checking Pip" | |
if command -v pip &>/dev/null; then | |
pipversion=$(pip --version ) | |
info "$pipversion" | |
info "Pip installed!" | |
else | |
error "Pip is not installed" | |
warning "Attempting to install pip..." | |
curl -L https://bootstrap.pypa.io/get-pip.py | python | |
[ $? != 0 ] && fatal "Error installing pip, please install pip manually, exiting..." | |
fi | |
} | |
check_yq(){ | |
debug | |
info "Checking yq (.yml CLI processor)" | |
if command -v yq &>/dev/null; then | |
yqversion=$(yq --version 2>&1) | |
info "$yqversion" | |
info "yq installed!" | |
else | |
error "yq is not installed" | |
warning "Attempting to install using pip..." | |
pip install yq | |
[ $? != 0 ] && fatal "Error installing yq, please install yq manually, exiting..." | |
fi | |
} | |
check_jq(){ | |
debug | |
info "Checking jq (JSON CLI processor)" | |
if command -v yq &>/dev/null; then | |
jqversion=$(yq --version 2>&1) | |
info "$jqversion" | |
info "jq installed!" | |
else | |
error "yq is not installed" | |
warning "Attempting to install using pip..." | |
pip install jq | |
[ $? != 0 ] && fatal "Error installing jq, please install jq manually, exiting..." | |
fi | |
} | |
check_aws_cli(){ | |
debug | |
info "Checking AWS CLI" | |
if command -v aws &>/dev/null; then | |
awsversion=$(unset AWS_PROFILE && aws --version) | |
info "$(echo "$awsversion" | tr -d '\n')" | |
info "AWS CLI installed!" | |
else | |
error "AWS CLI is not installed" | |
warning "Attempting to install AWS CLI..." | |
pip install awscli | |
[ $? != 0 ] && fatal "Error installing awscli, please install it manually, exiting..." | |
fi | |
} | |
check_ansible(){ | |
debug | |
info "Checking ansible" | |
if command -v ansible &>/dev/null; then | |
ansible_version=$(ansible --version) | |
info "$(echo "$ansible_version" | tr -d '\n')" | |
info "ANSIBLE installed!" | |
else | |
error "ANSIBLE is not installed" | |
warning "Attempting to install ANSIBLE..." | |
pip install ansible | |
[ $? != 0 ] && fatal "Error installing ansible, please install it manually, exiting..." | |
fi | |
} | |
manual_install_aws_cli(){ | |
debug | |
TMP_DIR=awstmp | |
mkdir "$TMP_DIR" && cd "$TMP_DIR" | |
curl -o awscli.zip https://s3.amazonaws.com/aws-cli/awscli-bundle.zip | |
unzip awscli.zip | |
./awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws | |
cd "$ORIGIN" | |
rm -rf "$TMP_DIR" | |
} | |
create_vpc(){ | |
debug | |
info "Creating VPC" | |
VPC_ID_DOC=$(aws ec2 create-vpc --cidr-block "${VPC_IPV4_CIDR}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}") | |
VPC_ID=$(echo $VPC_ID_DOC | jq -r '.Vpc.VpcId') | |
aws ec2 create-tags --tags "Key=Name,Value=${TAG_PERFIX}-vpc" --resources "${VPC_ID}" | |
[ -z "$VPC_ID" ] && fatal 'Error creating vpc' | |
info "VPC created id: ${VPC_ID}" | |
} | |
create_subnet_in_vpc(){ | |
debug | |
info "Creating subnet ipv4 CIDR ${VPC_IPV4_CIDR}" | |
SUBNET_DOC=$(aws ec2 create-subnet --availability-zone "${AWS_REGION}${AV_ZONE}" --vpc-id "${VPC_ID}" --cidr-block "${VPC_IPV4_CIDR}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}") | |
SUBNET_ID=$(echo $SUBNET_DOC | jq -r '.Subnet.SubnetId') | |
} | |
create_key_pair(){ | |
debug | |
KEY_PAIR_CREATED_FILE=${KEY_PAIR_CREATED_FILE:-'ec2-swarm-cluster.pem'} | |
KEY_PAIR_CREATED_NAME=${KEY_PAIR_CREATED_NAME:-'ec2-swarm-cluster'} | |
aws ec2 create-key-pair --key-name "${KEY_PAIR_CREATED_NAME}" \ | |
--query 'KeyMaterial' \ | |
--profile "${AWS_PROFILE}" \ | |
--region "${AWS_REGION}" --output text > "${KEY_PAIR_CREATED_FILE}" | |
chmod 400 "${KEY_PAIR_CREATED_FILE}" | |
} | |
create_internet_gateway_vpc(){ | |
debug | |
info "Creating internet gateway" | |
INTERNET_GATEWAY_DOC=$(aws ec2 create-internet-gateway --profile "${AWS_PROFILE}" --region "${AWS_REGION}") | |
INTERNET_GATEWAY_ID=$(echo $INTERNET_GATEWAY_DOC | jq -r '.InternetGateway.InternetGatewayId' ) | |
[ -z "$INTERNET_GATEWAY_ID" ] && fatal 'Error creating internet gateway' | |
info "Internet Gateway ID: $INTERNET_GATEWAY_ID" | |
aws ec2 create-tags --tags "Key=Name,Value=${TAG_PERFIX}-internet" --resources "${INTERNET_GATEWAY_ID}" | |
info "Attach internet gateway to VPC" | |
aws ec2 attach-internet-gateway \ | |
--vpc-id "${VPC_ID}" \ | |
--internet-gateway-id "${INTERNET_GATEWAY_ID}" \ | |
--region "${AWS_REGION}" \ | |
--profile ${AWS_PROFILE} | |
info "Creating routes table" | |
ROUTES_TABLE_DOC=$(aws ec2 create-route-table --vpc-id "${VPC_ID}" --region "${AWS_REGION}" --profile ${AWS_PROFILE}) | |
ROUTES_TABLE_ID=$(echo $ROUTES_TABLE_DOC | jq -r '.RouteTable.RouteTableId') | |
[ -z "$ROUTES_TABLE_ID" ] && fatal 'Error creating routes table' | |
aws ec2 create-tags --tags "Key=Name,Value=${TAG_PERFIX}-routes" --resources "${ROUTES_TABLE_ID}" | |
info "Creating route to redirect all traffic (0.0.0.0/0) to internet gateway" | |
aws ec2 create-route --route-table-id "${ROUTES_TABLE_ID}" \ | |
--destination-cidr-block 0.0.0.0/0 \ | |
--gateway-id "${INTERNET_GATEWAY_ID}" \ | |
--profile "${AWS_PROFILE}" \ | |
--region "${AWS_REGION}" | |
info "Listing routes tables" | |
aws ec2 describe-route-tables --route-table-id "${ROUTES_TABLE_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
info "Listing subnets" | |
SUBNET_ID=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${VPC_ID}" --query 'Subnets[*].{ID:SubnetId,CIDR:CidrBlock}' --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | jq -r '.[0].ID') | |
info "Subnet ID: ${SUBNET_ID}" | |
info "Associate route table to subnet" | |
ROUTE_TABLE_ASSOCIATION_DOC=$(aws ec2 associate-route-table --subnet-id "${SUBNET_ID}" --route-table-id "${ROUTES_TABLE_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}") | |
ROUTE_TABLE_ASSOCIATION_ID=$(echo $ROUTE_TABLE_ASSOCIATION_DOC | jq -r '.AssociationId') | |
info "Route table association ID: ${ROUTE_TABLE_ASSOCIATION_ID}" | |
} | |
create_ec2_instance(){ | |
debug | |
local INSTANCE_NAME=$1 | |
local AWS_TAGS=$2 | |
info "Executing EC2 docker-machine creation command for :: ${INSTANCE_NAME}" | |
set -x | |
docker-machine create --driver "${DRIVER}" \ | |
--amazonec2-tags "${AWS_TAGS}" \ | |
--amazonec2-vpc-id "${VPC_ID}" \ | |
--amazonec2-subnet-id "${SUBNET_ID}" \ | |
--amazonec2-region "${AWS_REGION}" \ | |
--amazonec2-zone "${AV_ZONE}" \ | |
--amazonec2-keypair-name "${KEY_PAIR_NAME}" \ | |
--amazonec2-ssh-keypath "${KEY_PAIR_PATH}" \ | |
--amazonec2-instance-type "${INSTANCE_TYPE}" \ | |
--amazonec2-root-size "${INSTANCE_SIZE}" \ | |
--amazonec2-security-group "${SECGROUP_NAME}" "${INSTANCE_NAME}" | |
set +x | |
} | |
wait_for(){ | |
debug | |
local TIME_TO_WAIT=$1 | |
warning "Waiting $TIME_TO_WAIT seconds" | |
sleep ${TIME_TO_WAIT} | |
} | |
allocate_elastic_ip(){ | |
debug | |
ELASTIC_IP_DOC=$(aws ec2 allocate-address --output json --region "${AWS_REGION}" --profile "${AWS_PROFILE}") | |
ELASTIC_IP=$(echo $ELASTIC_IP_DOC | jq -r '.PublicIp') | |
ELASTIC_IP_ALLOCATION_ID=$(echo $ELASTIC_IP_DOC | jq -r '.AllocationId') | |
[ -z ${ELASTIC_IP_ALLOCATION_ID} ] && fatal "ERROR creating elastic ip" | |
info "Elastic ip created ID: ${ELASTIC_IP_ALLOCATION_ID}" | |
} | |
associate_elastic_ip(){ | |
debug | |
local NODE_NAME=$1 | |
info "Associate elastic ip" | |
EC2_INSTANCE_ID=$(docker-machine inspect "${NODE_NAME}" | jq -r '.Driver.InstanceId') | |
ELASTIC_IP_ASSOCIATION_DOC=$(aws ec2 associate-address --instance-id "${EC2_INSTANCE_ID}" --allocation-id "${ELASTIC_IP_ALLOCATION_ID}" --region "${AWS_REGION}" --profile "${AWS_PROFILE}") | |
ELASTIC_IP_ASSOCIATION_ID=$(echo $ELASTIC_IP_ASSOCIATION_DOC | jq -r '.AssociationId') | |
info "Elastic ip address association ID: ${ELASTIC_IP_ASSOCIATION_ID}" | |
} | |
get_inet_interface_ip(){ | |
debug | |
local NODE_NAME=$1 | |
info "ifconfig interface info" | |
info | |
multi_info $(docker-machine ssh "${NODE_NAME}" ifconfig eth0) | |
NODE_IP_ADDRESS=$(docker-machine ip "${NODE_NAME}") | |
} | |
get_hosted_zone_id(){ | |
debug | |
local DOMAIN=$1 | |
HOSTED_ZONE_DOC=$(aws route53 list-hosted-zones --profile ${AWS_PROFILE}) | |
HOSTED_ZONE_ID_STR=$(echo $HOSTED_ZONE_DOC | jq -r ".HostedZones[] | select(.Name==\"$DOMAIN.\").Id") | |
HOSTED_ZONE_ID=${HOSTED_ZONE_ID_STR:12} | |
} | |
create_registry_set_in_hosted_zone(){ | |
debug | |
local SUBDOMAIN=$1 | |
local DOMAIN=$2 | |
info "Creating registry set in Route 53" | |
DNS_DOC="{ \"Comment\": \"Automatically created\", \"Changes\":[{ \"Action\": \"CREATE\", \"ResourceRecordSet\": {\"Name\": \"$SUBDOMAIN.$DOMAIN\", \"Type\": \"A\", \"TTL\": 300,\"ResourceRecords\": [{ \"Value\": \"$ELASTIC_IP\" }]}}]}" | |
RESOURCE_RECORD_SET=$(aws route53 list-resource-record-sets --hosted-zone-id "$HOSTED_ZONE_ID" --profile "${AWS_PROFILE}" | jq -r ".ResourceRecordSets[] | select(.Name==\"$SUBDOMAIN.$DOMAIN.\")") | |
if [ -z "$RESOURCE_RECORD_SET" ]; then | |
info "CREATING RECORD SET..." | |
DNS_REGISTRY_SET_DOC=$(aws route53 change-resource-record-sets --hosted-zone-id "$HOSTED_ZONE_ID" --change-batch "$DNS_DOC" --profile "${AWS_PROFILE}") | |
DNS_REGISTRY_ENTRY_ID_STR=$(echo $DNS_REGISTRY_SET_DOC | jq -r '.ChangeInfo.Id') | |
DNS_REGISTRY_ENTRY_ID=${DNS_REGISTRY_ENTRY_ID_STR:8} | |
info "DNS registry set entry id created: $DNS_REGISTRY_ENTRY_ID" | |
else | |
warning "RECORD SET ALREADY CREATED..." | |
warning | |
echo $RESOURCE_RECORD_SET | jq | |
fi | |
} | |
get_aws_security_group(){ | |
debug | |
local SECURITY_GROUP_NAME=$1 | |
info "Getting ${MANAGER_NAME} security group information" | |
SECURITY_GROUP_DOC=$(aws ec2 describe-security-groups --region "${AWS_REGION}" --profile "${AWS_PROFILE}" | jq -r ".SecurityGroups[] | select(.GroupName==\"$SECURITY_GROUP_NAME\")") | |
SECURITY_GROUP_ID=$(echo $SECURITY_GROUP_DOC | jq -r ".GroupId") | |
info "Security group id ${SECURITY_GROUP_ID}" | |
INBOUND_PORT_22=$(echo $SECURITY_GROUP_DOC | jq -r '.IpPermissions[] | select(.FromPort==22)') | |
INBOUND_PORT_80=$(echo $SECURITY_GROUP_DOC | jq -r '.IpPermissions[] | select(.FromPort==80)') | |
INBOUND_PORT_443=$(echo $SECURITY_GROUP_DOC | jq -r '.IpPermissions[] | select(.FromPort==443)') | |
} | |
open_http_port_in_security_group(){ | |
debug | |
if [ ! -z "$INBOUND_PORT_80" ]; then | |
info "port 80 found in docker-machine security group" | |
else | |
warning "port 80 not found in docker-machine security group. Adding..." | |
aws ec2 authorize-security-group-ingress --group-id "${SECURITY_GROUP_ID}" \ | |
--protocol "tcp" \ | |
--port "80" \ | |
--cidr "0.0.0.0/0" \ | |
--region "${AWS_REGION}" \ | |
--profile "${AWS_PROFILE}" | |
warning "port 8080 not found in docker-machine security group. Adding..." | |
aws ec2 authorize-security-group-ingress --group-id "${SECURITY_GROUP_ID}" \ | |
--protocol "tcp" \ | |
--port "8080" \ | |
--cidr "0.0.0.0/0" \ | |
--region "${AWS_REGION}" \ | |
--profile "${AWS_PROFILE}" | |
warning "port 8081 not found in docker-machine security group. Adding..." | |
aws ec2 authorize-security-group-ingress --group-id "${SECURITY_GROUP_ID}" \ | |
--protocol "tcp" \ | |
--port "8081" \ | |
--cidr "0.0.0.0/0" \ | |
--region "${AWS_REGION}" \ | |
--profile "${AWS_PROFILE}" | |
fi | |
} | |
open_https_port_in_security_group(){ | |
debug | |
if [ ! -z "$INBOUND_PORT_443" ]; then | |
info "port 443 found in docker-machine security group" | |
else | |
warning "port 443 not found in docker-machine security group. Adding..." | |
aws ec2 authorize-security-group-ingress --group-id "${SECURITY_GROUP_ID}" \ | |
--protocol "tcp" \ | |
--port "443" \ | |
--cidr "0.0.0.0/0" \ | |
--region "${AWS_REGION}" \ | |
--profile "${AWS_PROFILE}" | |
fi | |
} | |
open_ssh_port_in_security_group(){ | |
debug | |
if [ ! -z "$INBOUND_PORT_22" ]; then | |
info "port 22 found in docker-machine security group" | |
else | |
warning "port 22 not found in docker-machine security group. Adding..." | |
aws ec2 authorize-security-group-ingress --group-id "${SECURITY_GROUP_ID}" \ | |
--protocol "tcp" \ | |
--port "22" \ | |
--cidr "0.0.0.0/0" \ | |
--region "${AWS_REGION}" \ | |
--profile "${AWS_PROFILE}" | |
fi | |
} | |
open_alt_https_port_in_security_group(){ | |
debug | |
if [ ! -z "$INBOUND_PORT_22" ]; then | |
info "port 22 found in docker-machine security group" | |
else | |
warning "port 22 not found in docker-machine security group. Adding..." | |
aws ec2 authorize-security-group-ingress --group-id "${SECURITY_GROUP_ID}" \ | |
--protocol "tcp" \ | |
--port "22" \ | |
--cidr "0.0.0.0/0" \ | |
--region "${AWS_REGION}" \ | |
--profile "${AWS_PROFILE}" | |
fi | |
} | |
open_ports_in_security_group(){ | |
debug | |
local SECURITY_GROUP_ID=$1 | |
IFS=$'\n' | |
for CURRENT_PORT in "${PORTS_TO_OPEN_ARRAY[@]}" | |
do | |
PROTOCOL="${CURRENT_PORT%%:*}" | |
PORT="${CURRENT_PORT##*:}" | |
info "opening port protocol: $PROTOCOL port: $PORT" | |
set -x | |
aws ec2 authorize-security-group-ingress --group-id "${SECURITY_GROUP_ID}" \ | |
--protocol "${PROTOCOL}" \ | |
--port "${PORT}" \ | |
--region "${AWS_REGION}" \ | |
--source-group "${SECURITY_GROUP_ID}" | |
set +x | |
done | |
} | |
create_swarm_workers(){ | |
debug | |
IFS=$'\n' | |
for CURRENT_WORKER in "${WORKERS_ARRAY[@]}" | |
do | |
create_ec2_instance "${CURRENT_WORKER}" "Name,swarm-${CURRENT_WORKER}" | |
wait_for 15 | |
get_inet_interface_ip "${MANAGER_NAME}" | |
info "Worker created IP address: ${NODE_IP_ADDRESS}" | |
done | |
} | |
create_swarm_manager(){ | |
debug | |
create_ec2_instance "${MANAGER_NAME}" "Name,swarm-${MANAGER_NAME}" | |
} | |
associate_elastic_ip_to_manager(){ | |
debug | |
get_inet_interface_ip "${MANAGER_NAME}" | |
info "Manager created IP address ${NODE_IP_ADDRESS}" | |
info "Associate previously created elastic ip to manager node" | |
associate_elastic_ip "${MANAGER_NAME}" | |
} | |
export_docker_machine_configs(){ | |
debug | |
"${BASEDIR}"/../docker/docker-machine-export.sh "${MANAGER_NAME}" | |
IFS=$'\n' | |
for WORKER in "${WORKERS_ARRAY[@]}" | |
do | |
${BASEDIR}/../docker/docker-machine-export.sh "${WORKER}" | |
done | |
} | |
# | |
# sed replace ips in ansible inventory file | |
# | |
create_aws_ansible_inventory_file(){ | |
debug | |
INSTANCE_LIST=$(docker-machine ls -f "{{.Name}}") | |
set -x | |
for MACHINE_NAME in $INSTANCE_LIST | |
do | |
if [[ "$MACHINE_NAME" =~ ^.*manager.*$ ]]; then | |
MANAGER_IP=$(docker-machine ip ${MACHINE_NAME}) | |
else | |
if [ -z "$WORKERS_IPS_STR" ]; then | |
WORKERS_IPS_STR=$(docker-machine ip ${MACHINE_NAME}) | |
else | |
WORKER_IP=$(docker-machine ip ${MACHINE_NAME}) | |
WORKERS_IPS_STR="${WORKERS_IPS_STR}\n${WORKER_IP}" | |
fi | |
fi | |
done | |
info "WORKERS IPS :: " | |
echo $WORKERS_IPS_STR | |
if is_osx; then | |
warning "GNU sed must be available as gsed" | |
gsed -e "s_##MANAGERIP##_${MANAGER_IP}_" \ | |
-e "s_##EC2USER##_${EC2_AMI_DEFAULT_USER}_" \ | |
-e "s_##KEYPATH##_${KEY_PAIR_PATH}_" \ | |
-e 's_##WORKERSIPS##_'"${WORKERS_IPS_STR}"'_' \ | |
${BASEDIR}/../ansible/inventory/AWS.tmpl > ${BASEDIR}/../ansible/inventory/AWS | |
else | |
sed -e "s_##MANAGERIP##_${MANAGER_IP}_" \ | |
-e "s_##EC2USER##_${EC2_AMI_DEFAULT_USER}_" \ | |
-e "s_##KEYPATH##_${KEY_PAIR_PATH}_" \ | |
-e 's_##WORKERSIPS##_'"${WORKERS_IPS_STR}"'_' \ | |
${BASEDIR}/../ansible/inventory/AWS.tmpl > ${BASEDIR}/../ansible/inventory/AWS | |
fi | |
set +x | |
} | |
run_swarm_init_ansible_playbook(){ | |
debug | |
run_playbook "docker-provision-ubuntu" "AWS" | |
run_playbook "swarm-provision" "AWS" | |
run_playbook "docker-cleanup-cron" "AWS" | |
} | |
err_cleanup(){ | |
debug | |
error "Error detected on line: $1" | |
error "Executing cleanup" | |
set -e | |
if [ ! -z "$phase_5" ]; then | |
warning "Removing workers instances" | |
docker-machine ls -f "{{.Name}}" | grep 'worker' | xargs docker-machine rm -f | |
wait_for 35 | |
fi | |
if [ ! -z "$phase_4" ]; then | |
warning "Desassociate elastic ip adress :: ${ELASTIC_IP_ASSOCIATION_ID}" | |
aws ec2 disassociate-address --association-id "${ELASTIC_IP_ASSOCIATION_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
fi | |
if [ ! -z "$phase_3" ]; then | |
warning "Removing managers instances" | |
docker-machine ls -f "{{.Name}}" | grep 'manager' | xargs docker-machine rm -f | |
wait_for 35 | |
fi | |
if [ ! -z "$phase_2" ]; then | |
warning "Releasing elastic ip address :: ${ELASTIC_IP_ALLOCATION_ID}" | |
aws ec2 release-address --allocation-id "${ELASTIC_IP_ALLOCATION_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
fi | |
if [ ! -z "$phase_1" ]; then | |
warning "Deleting VPC :: ${VPC_ID}" | |
set -x | |
aws ec2 delete-security-group --group-id "${SECURITY_GROUP_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
aws ec2 delete-subnet --subnet-id "${SUBNET_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
aws ec2 delete-route-table --route-table-id "${ROUTES_TABLE_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
aws ec2 detach-internet-gateway --internet-gateway-id "${INTERNET_GATEWAY_ID}" --vpc-id "${VPC_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
aws ec2 delete-internet-gateway --internet-gateway-id "${INTERNET_GATEWAY_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
aws ec2 delete-vpc --vpc-id "${VPC_ID}" --profile "${AWS_PROFILE}" --region "${AWS_REGION}" | |
set +x | |
fi | |
separator | |
error "FINISHED CLEANUP" | |
ELAPSED="Elapsed: $(($SECONDS / 3600))hrs $((($SECONDS / 60) % 60))min $(($SECONDS % 60))sec" | |
fatal "$ELAPSED" | |
} | |
is_osx(){ | |
debug | |
[[ "$(uname)" == *"arwin" ]] | |
} | |
check_dependencies(){ | |
debug | |
check_python | |
check_pip | |
check_aws_cli | |
check_jq | |
} | |
# | |
# For docker swarm provisioning playbook | |
# | |
run_playbook(){ | |
export ANSIBLE_NOCOWS=1 | |
export ANSIBLE_RETRY_FILES_ENABLED=${ANSIBLE_RETRY_FILES_ENABLED:-false} | |
export ANSIBLE_STDOUT_CALLBACK=${ANSIBLE_STDOUT_CALLBACK:-debug} | |
local PLAYBOOK="$BASEDIR/../ansible/playbooks/$1.yml" | |
local INVENTORY="$BASEDIR/../ansible/inventory/$2" | |
echo "Running $PLAYBOOK" | |
echo "Using $INVENTORY" | |
AWS_KEY_ID=${AWS_KEY_ID:="$(aws configure get aws_access_key_id --profile ${AWS_PROFILE})"} | |
AWS_SECRET_KEY=${AWS_SECRET_KEY:="$(aws configure get aws_secret_access_key --profile ${AWS_PROFILE})"} | |
EXTRA_PARAMS="--extra-vars 'aws_cli_user=${AWS_CLI_USER} aws_region=${AWS_REGION} aws_profile=${AWS_PROFILE} aws_key_id=${AWS_KEY_ID} aws_secret_key=${AWS_SECRET_KEY} aws_registry_id=${AWS_REGISTRY_ID}'" | |
set -x | |
eval ansible-playbook -i "$INVENTORY" "$PLAYBOOK" -e 'host_key_checking=False' "${EXTRA_PARAMS}" -v | |
set +x | |
} | |
export_cluster_variables(){ | |
debug | |
mkdir -p "${BASEDIR}/cluster-vars/" | |
} | |
if [[ "${BASH_SOURCE[0]}" = "$0" ]]; then | |
SECONDS=0 | |
if is_no_colors; then | |
unset_colors | |
else | |
set_colors | |
fi | |
trap 'err_cleanup $LINENO' ERR | |
set -eE | |
separator | |
bump_step "CREATING EC2 SWARM CLUSTER" | |
separator | |
bump_step "CHECK SCRIPT DEPENDENCIES" | |
check_dependencies | |
separator | |
bump_step "CREATE VPC" | |
create_vpc | |
wait_for 10 | |
separator | |
bump_step "CREATE SUBNET" | |
create_subnet_in_vpc | |
wait_for 20 | |
separator | |
bump_step "CREATE INTERNET GATEWAY" | |
create_internet_gateway_vpc | |
wait_for 10 | |
log "${MAG}[PHASE] 1 COMPLETE${NC}" | |
phase_1='ok' | |
separator | |
bump_step "ALLOCATE ELASTIC IP FOR MANAGER" | |
allocate_elastic_ip | |
log "${MAG}[PHASE] 2 COMPLETE${NC}" | |
phase_2='ok' | |
separator | |
bump_step "SWARM MANAGER CREATION" | |
create_swarm_manager | |
wait_for 20 | |
log "${MAG}[PHASE] 3 COMPLETE${NC}" | |
phase_3='ok' | |
separator "ASSOCIATE ELLASTIC IP TO MANAGER" | |
associate_elastic_ip_to_manager | |
wait_for 5 | |
log "${MAG}[PHASE] 4 COMPLETE${NC}" | |
phase_4='ok' | |
separator | |
bump_step "SECURITY GROUP CONFIGURATION" | |
get_aws_security_group "${SECGROUP_NAME}" | |
open_ports_in_security_group "${SECURITY_GROUP_ID}" | |
open_http_port_in_security_group "${SECURITY_GROUP_ID}" | |
open_https_port_in_security_group "${SECURITY_GROUP_ID}" | |
open_ssh_port_in_security_group "${SECURITY_GROUP_ID}" | |
separator | |
bump_step "SWARM WORKER CREATION" | |
create_swarm_workers | |
log "${MAG}[PHASE] 5 COMPLETE${NC}" | |
phase_5='ok' | |
separator | |
bump_step "CREATE AWS ANSIBLE INVENTORY FILE" | |
create_aws_ansible_inventory_file | |
separator | |
bump_step "EXPORT DOCKER MACHINE CONFIGS" | |
export_docker_machine_configs | |
separator | |
bump_step "SUBDOMAIN CREATION IN ROUTE 53" | |
get_hosted_zone_id "${ROUTE_53_DOMAIN}" | |
create_registry_set_in_hosted_zone "${ROUTE_53_SUBDOMAIN}" "${ROUTE_53_DOMAIN}" | |
separator | |
bump_step "INIT DOCKER SWARM" | |
set -x | |
run_swarm_init_ansible_playbook | |
set +x | |
separator | |
ELAPSED="Elapsed: $(($SECONDS / 3600))hrs $((($SECONDS / 60) % 60))min $(($SECONDS % 60))sec" | |
bump_step "FINISHED CREATING SWARM CLUSTER :: $ELAPSED" | |
separator | |
fi |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment