Last active
January 18, 2022 00:43
-
-
Save vfarcic/6eecd11ee93ce408ad8a7443e2a3c1a9 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Requirements: | |
# git: https://git-scm.com/ | |
# terraform: https://www.terraform.io/downloads.html | |
# kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/ | |
# helm: https://helm.sh/docs/using_helm/ | |
# aws: https://aws.amazon.com/cli/ | |
# eksctl: https://eksctl.io/ | |
# AWS: https://aws.amazon.com/ account with admin privileges | |
# GitBash (if Windows) | |
################ | |
# Contributors # | |
################ | |
# open https://github.com/terraform-aws-modules/terraform-aws-eks/graphs/contributors | |
########## | |
# Create # | |
########## | |
# Follow the instructions from https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html to create an Access Key ID and a Secret Access Key | |
export AWS_ACCESS_KEY_ID=[...] | |
export AWS_SECRET_ACCESS_KEY=[...] | |
export AWS_DEFAULT_REGION=us-west-2 | |
eksctl create cluster --help \ | |
| grep "\-version" | |
export VERSION=[...] | |
export CLUSTER_NAME=devops-paradox | |
export KUBECONFIG=$PWD/kubeconfig | |
SECONDS=0 | |
eksctl create cluster \ | |
--name $CLUSTER_NAME \ | |
--region $AWS_DEFAULT_REGION \ | |
--node-type t2.small \ | |
--nodes-max 6 \ | |
--nodes-min 3 \ | |
--asg-access \ | |
--version $VERSION | |
echo "$(($SECONDS / 60))m$(($SECONDS % 60))s" | |
aws iam list-roles \ | |
| jq -r ".Roles[] \ | |
| select(.RoleName \ | |
| startswith(\"eksctl-$CLUSTER_NAME-nodegroup\")) \ | |
.RoleName" | |
IAM_ROLE=$(aws iam list-roles \ | |
| jq -r ".Roles[] \ | |
| select(.RoleName \ | |
| startswith(\"eksctl-$CLUSTER_NAME-nodegroup\")) \ | |
.RoleName") | |
aws iam put-role-policy \ | |
--role-name $IAM_ROLE \ | |
--policy-name $CLUSTER_NAME-AutoScaling \ | |
--policy-document https://raw.githubusercontent.com/vfarcic/k8s-specs/master/scaling/eks-autoscaling-policy.json | |
mkdir -p charts | |
helm fetch stable/cluster-autoscaler \ | |
-d charts \ | |
--untar | |
mkdir -p ca | |
helm template charts/cluster-autoscaler \ | |
--name aws-cluster-autoscaler \ | |
--output-dir ca \ | |
--namespace kube-system \ | |
--set autoDiscovery.clusterName=$CLUSTER_NAME \ | |
--set awsRegion=$AWS_DEFAULT_REGION \ | |
--set sslCertPath=/etc/kubernetes/pki/ca.crt \ | |
--set rbac.create=true | |
kubectl apply \ | |
--namespace kube-system \ | |
--filename ca/cluster-autoscaler/* | |
kubectl get nodes | |
######### | |
# Scale # | |
######### | |
git clone \ | |
https://github.com/vfarcic/devops-toolkit.git | |
cd devops-toolkit | |
helm template charts/devops-toolkit \ | |
--name devops-toolkit \ | |
--output-dir $PWD \ | |
--set replicaCount=35 \ | |
--set image.repository=vfarcic/devops-toolkit-series \ | |
--set image.tag=latest \ | |
--set domain=false | |
kubectl apply \ | |
--filename devops-toolkit \ | |
--recursive | |
cd .. | |
kubectl get pods | |
kubectl get nodes | |
########### | |
# Upgrade # | |
########### | |
kubectl version --output yaml | |
eksctl create cluster --help \ | |
| grep "\-version" | |
export VERSION=[...] | |
export AWS_ACCESS_KEY_ID=[...] | |
export AWS_SECRET_ACCESS_KEY=[...] | |
export KUBECONFIG=$PWD/kubeconfig | |
watch --interval 10 \ | |
"kubectl get pods,nodes" | |
eksctl update cluster \ | |
--name=$CLUSTER_NAME | |
eksctl update cluster \ | |
--name=$CLUSTER_NAME \ | |
--approve | |
kubectl version --output yaml | |
eksctl get nodegroups \ | |
--cluster=$CLUSTER_NAME | |
NODE_GROUP=[...] | |
eksctl create nodegroup \ | |
--cluster $CLUSTER_NAME \ | |
--region $AWS_DEFAULT_REGION \ | |
--node-type t2.small \ | |
--nodes-max 6 \ | |
--nodes-min 3 \ | |
--asg-access \ | |
--version $VERSION | |
eksctl delete nodegroup \ | |
--cluster $CLUSTER_NAME \ | |
--name $NODE_GROUP | |
########### | |
# Destroy # | |
########### | |
eksctl delete cluster --name $CLUSTER_NAME | |
IAM_ROLE=$(aws iam list-roles \ | |
| jq -r ".Roles[] \ | |
| select(.RoleName \ | |
| startswith(\"eksctl-$CLUSTER_NAME-nodegroup\")) \ | |
.RoleName") | |
aws iam delete-role-policy \ | |
--role-name $IAM_ROLE \ | |
--policy-name $CLUSTER_NAME-AutoScaling | |
eksctl delete cluster -n $CLUSTER_NAME |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment