Skip to content

Instantly share code, notes, and snippets.

@caruccio
Last active October 27, 2021 23:21
Show Gist options
  • Save caruccio/9cb40ea915bd01d46feb5b6cbf5b83ac to your computer and use it in GitHub Desktop.
Save caruccio/9cb40ea915bd01d46feb5b6cbf5b83ac to your computer and use it in GitHub Desktop.
Undistro Demo EKS
#!/bin/bash
case "$1" in
-h|--help)
echo "Usage: $0 [phase]"
echo ""
echo "Default phases, in this order: "
echo " download Download all required tools into current directory"
echo " credentials Ask for missing credentials"
echo " create-manager-cluster Create Manager Cluster, used to create \"real\" Clusters"
echo " install-undistro Install all Undistro controllers into Manager Cluster"
echo " create-cluster Create \"real\" Cluster"
echo " wait-cluster Wait for \"real\" Cluster to become ready"
echo ""
echo "Extra phases:"
echo " destroy Delete all clusters and resources"
exit
esac
set -e
: ${MANAGER_CLUSTER_NAME:=undistro-demo-manager}
: ${MANAGER_CLUSTER_NODEPOOL=default}
: ${KUBECONFIG:=$PWD/kubeconfig-$MANAGER_CLUSTER_NAME}
: ${CLUSTER_NAMESPACE:=aws-demo}
: ${CLUSTER_NAME:=eks-demo}
: ${UNDISTRO_VERSION:=0.36.16}
: ${CLUSTER_KUBECONFIG:=kubeconfig-eks-demo.yaml}
export KUBECONFIG
export PATH=$PWD/:$PATH
function log()
{
[ "${1:0:1}" == - ] && p=$1 && shift
echo -e $p ">>> $(tput setaf 3)$@$(tput sgr0)"
}
function logb()
{
[ "${1:0:1}" == - ] && p=$1 && shift
echo -e $p ">>> $(tput bold)$(tput setaf 3)$@$(tput sgr0)"
}
function pause()
{
[ $# -eq 0 ] || logb "$@"
read -p "$(tput bold)>>> $(tput setaf 1)Press [ENTER] to continue ...$(tput sgr0)"
}
function phase_download()
{
################################################################################
logb "Downloading binaries"
################################################################################
log "> undistro"
if ! [ -x ./undistro ]; then
curl -sL https://github.com/getupio-undistro/undistro/releases/download/v$UNDISTRO_VERSION/undistro_${UNDISTRO_VERSION}_linux_amd64.tar.gz \
| tar xz undistro
chmod +x undistro
fi
log "> eksctl"
if ! [ -x ./eksctl ]; then
curl -L "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" \
| tar xz eksctl
chmod +x eksctl
fi
log "> aws-iam-authenticator"
if ! [ -x ./aws-iam-authenticator ]; then
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.21.2/2021-07-05/bin/linux/amd64/aws-iam-authenticator
chmod +x aws-iam-authenticator
fi
log "> kubectl"
if ! [ -x ./kubectl ]; then
curl -skL https://storage.googleapis.com/kubernetes-release/release/v1.20.6/bin/linux/amd64/kubectl > kubectl
chmod +x kubectl
fi
}
function phase_credentials()
{
################################################################################
logb "Reading AWS credentials"
################################################################################
[ -n "$AWS_ACCESS_KEY_ID" ] && log Found AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID || read -p "AWS_ACCESS_KEY_ID: " AWS_ACCESS_KEY_ID
[ -n "$AWS_SECRET_ACCESS_KEY" ] && log 'Found AWS_SECRET_ACCESS_KEY=*******' || { read -sp "AWS_SECRET_ACCESS_KEY: " AWS_SECRET_ACCESS_KEY && echo; }
[ -n "$AWS_REGION" ] && log Found AWS_REGION=$AWS_REGION || read -p "AWS_REGION: " AWS_REGION
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION
log "Current AWS Account:"
aws sts get-caller-identity
}
function phase_create_manager_cluster()
{
################################################################################
pause "Ready to create Manager Cluster: $MANAGER_CLUSTER_NAME"
################################################################################
log "Generating credentials.yaml"
cat >credentials.yaml <<EOF
global:
undistroRepository: registry.undistro.io/library
undistroVersion: v${UNDISTRO_VERSION}
undistro:
ingress:
hosts:
- ${CLUSTER_NAME}-${CLUSTER_NAMESPACE}.undistro-demo.getupcloud.com
undistro-aws:
enabled: true
credentials:
region: $AWS_REGION
accessKeyID: $AWS_ACCESS_KEY_ID
secretAccessKey: "$AWS_SECRET_ACCESS_KEY"
# sessionToken: put your key here if you use 2FA
EOF
logb "Looking for Manager Cluster: $MANAGER_CLUSTER_NAME ..."
if ! ./eksctl get cluster $MANAGER_CLUSTER_NAME &>/dev/null; then
logb "\nCreating Manager Cluster: $MANAGER_CLUSTER_NAME"
./eksctl create cluster \
--name $MANAGER_CLUSTER_NAME \
--version 1.20 \
--region us-east-1 \
--zones us-east-1a,us-east-1b \
--with-oidc \
--without-nodegroup
else
log "Found existing cluster $MANAGER_CLUSTER_NAME"
fi
################################################################################
pause "Ready to create Manager Cluster Nodepool: $MANAGER_CLUSTER_NODEPOOL"
logb "Looking for Manager Cluster Nodepool: $MANAGER_CLUSTER_NODEPOOL"
################################################################################
if ! ./eksctl get nodegroup --cluster $MANAGER_CLUSTER_NAME --name $MANAGER_CLUSTER_NODEPOOL &>/dev/null; then
eksctl create nodegroup \
--cluster $MANAGER_CLUSTER_NAME \
--region $AWS_REGION \
--name $MANAGER_CLUSTER_NODEPOOL \
--node-type m5.xlarge \
--nodes 1 \
--nodes-min 1 \
--nodes-max 1
else
log "Found existing nodepool $MANAGER_CLUSTER_NAME"
fi
logb "Downloading kubeconfig to KUBECONFIG=$KUBECONFIG"
eksctl utils write-kubeconfig \
--cluster $MANAGER_CLUSTER_NAME \
--region $AWS_REGION \
--kubeconfig $KUBECONFIG
}
function phase_install_undistro()
{
## get crd
if ! kubectl get namespace undistro-system &>/dev/null; then
################################################################################
pause "Manager Cluster is ready. Let's install Undistro into it"
log "Looking for namespace undistro-system"
################################################################################
logb "Installing undistro into Manager Cluster: $MANAGER_CLUSTER_NAME"
./undistro install --config credentials.yaml
else
logb "Undistro is already installed into Manager Cluster $MANAGER_CLUSTER_NAME"
fi
}
function phase_create_cluster()
{
################################################################################
pause "Ready to create Undistro Cluster: $CLUSTER_NAME"
################################################################################
if ! kubectl get cluster $CLUSTER_NAME &>/dev/null; then
################################################################################
log "Generating cluster manifests: $CLUSTER_NAME.yaml"
################################################################################
undistro create cluster $CLUSTER_NAME \
--namespace $CLUSTER_NAMESPACE \
--k8s-version v1.20.0 \
--infra aws \
--flavor eks \
--generate-file
################################################################################
log "Creating cluster $CLUSTER_NAME from manifests: $CLUSTER_NAME.yaml"
################################################################################
kubectl apply -f $CLUSTER_NAME.yaml
else
log "Cluster $CLUSTER_NAME already exists"
kubectl get cluster $CLUSTER_NAME
fi
}
function phase_wait_cluster()
{
################################################################################
log "Undistro Cluster $CLUSTER_NAMESPACE/$CLUSTER_NAME is being created..."
################################################################################
undistro show-progress -n $CLUSTER_NAMESPACE $CLUSTER_NAME &
while ! kubectl wait --timeout=-1s --for=condition=Ready -n $CLUSTER_NAMESPACE cluster/$CLUSTER_NAME &>/dev/null; do
sleep 2
done
kill %1 2>/dev/null
################################################################################
log "Undistro Cluster $CLUSTER_NAMESPACE/$CLUSTER_NAME is create"
################################################################################
log "Showing Undistro Cluster"
undistro get cluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME
log "Retrieving kubeconfig for Undistro Cluster $CLUSTER_NAME"
undistro get kubeconfig -n $CLUSTER_NAMESPACE $CLUSTER_NAME --admin > $CLUSTER_KUBECONFIG
echo
logb "You can now use your new cluster!"
echo
echo "export KUBECONFIG=$PWD/$CLUSTER_KUBECONFIG"
echo "kubectl get nodes"
}
function phase_destroy()
{
################################################################################
logb "Deleting all resources"
################################################################################
kubectl get cluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME
################################################################################
pause "Delete cluster above? THIS OPERATION CAN'T BE BE UNDONE!"
################################################################################
kubectl delete cluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME
eksctl get cluster --name $MANAGER_CLUSTER_NAME --region $AWS_REGION
################################################################################
pause "Delete Manager Cluster ? THIS OPERATION CAN'T BE BE UNDONE!"
################################################################################
eksctl delete cluster --wait --name $MANAGER_CLUSTER_NAME --region $AWS_REGION
}
if [ $# -eq 0 ]; then
phase_download
phase_credentials
phase_create_manager_cluster
phase_install_undistro
phase_create_cluster
phase_wait_cluster
else
logb "Running phases: $@"
echo "---"
for phase in $@; do
phase_${phase//-/_}
done
fi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment