Last active
January 18, 2022 00:51
-
-
Save vfarcic/868bf70ac2946458f5485edea1f6fc4c to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Source: https://gist.github.com/vfarcic/868bf70ac2946458f5485edea1f6fc4c | |
###################### | |
# Create The Cluster # | |
###################### | |
# Make sure that you're using eksctl v0.1.5+. | |
# Follow the instructions from https://github.com/weaveworks/eksctl to intall eksctl. | |
export AWS_ACCESS_KEY_ID=[...] # Replace [...] with AWS access key ID | |
export AWS_SECRET_ACCESS_KEY=[...] # Replace [...] with AWS secret access key | |
export AWS_DEFAULT_REGION=us-west-2 | |
export NAME=devops25 | |
mkdir -p cluster | |
eksctl create cluster \ | |
-n $NAME \ | |
-r $AWS_DEFAULT_REGION \ | |
--kubeconfig cluster/kubecfg-eks \ | |
--node-type t2.small \ | |
--nodes 3 \ | |
--nodes-max 9 \ | |
--nodes-min 3 | |
export KUBECONFIG=$PWD/cluster/kubecfg-eks | |
################### | |
# Install Ingress # | |
################### | |
kubectl apply \ | |
-f https://raw.githubusercontent.com/kubernetes/ingress-nginx/1cd17cd12c98563407ad03812aebac46ca4442f2/deploy/mandatory.yaml | |
kubectl apply \ | |
-f https://raw.githubusercontent.com/kubernetes/ingress-nginx/1cd17cd12c98563407ad03812aebac46ca4442f2/deploy/provider/aws/service-l4.yaml | |
kubectl apply \ | |
-f https://raw.githubusercontent.com/kubernetes/ingress-nginx/1cd17cd12c98563407ad03812aebac46ca4442f2/deploy/provider/aws/patch-configmap-l4.yaml | |
################## | |
# Install Tiller # | |
################## | |
kubectl create \ | |
-f https://raw.githubusercontent.com/vfarcic/k8s-specs/master/helm/tiller-rbac.yml \ | |
--record --save-config | |
helm init --service-account tiller | |
kubectl -n kube-system \ | |
rollout status deploy tiller-deploy | |
################## | |
# Metrics Server # | |
################## | |
helm install stable/metrics-server \ | |
--name metrics-server \ | |
--version 2.0.2 \ | |
--namespace metrics | |
kubectl -n metrics \ | |
rollout status \ | |
deployment metrics-server | |
################## | |
# Get Cluster IP # | |
################## | |
LB_HOST=$(kubectl -n ingress-nginx \ | |
get svc ingress-nginx \ | |
-o jsonpath="{.status.loadBalancer.ingress[0].hostname}") | |
export LB_IP="$(dig +short $LB_HOST \ | |
| tail -n 1)" | |
echo $LB_IP | |
# Repeat the `export` command if the output is empty | |
############################ | |
# Install Prometheus Chart # | |
############################ | |
PROM_ADDR=mon.$LB_IP.nip.io | |
AM_ADDR=alertmanager.$LB_IP.nip.io | |
helm upgrade -i prometheus \ | |
stable/prometheus \ | |
--namespace metrics \ | |
--version 7.1.3 \ | |
--set server.ingress.hosts={$PROM_ADDR} \ | |
--set alertmanager.ingress.hosts={$AM_ADDR} \ | |
-f mon/prom-values.yml | |
kubectl -n metrics \ | |
rollout status \ | |
deploy prometheus-server | |
############## | |
# Install CA # | |
############## | |
ASG_NAME=$(aws autoscaling \ | |
describe-auto-scaling-groups \ | |
| jq -r ".AutoScalingGroups[] \ | |
| select(.AutoScalingGroupName \ | |
| startswith(\"eksctl-$NAME-nodegroup\")) \ | |
.AutoScalingGroupName") | |
echo $ASG_NAME | |
aws autoscaling \ | |
create-or-update-tags \ | |
--tags \ | |
ResourceId=$ASG_NAME,ResourceType=auto-scaling-group,Key=k8s.io/cluster-autoscaler/enabled,Value=true,PropagateAtLaunch=true \ | |
ResourceId=$ASG_NAME,ResourceType=auto-scaling-group,Key=kubernetes.io/cluster/$NAME,Value=true,PropagateAtLaunch=true | |
IAM_ROLE=$(aws iam list-roles \ | |
| jq -r ".Roles[] \ | |
| select(.RoleName \ | |
| startswith(\"eksctl-$NAME-nodegroup-0-NodeInstanceRole\")) \ | |
.RoleName") | |
echo $IAM_ROLE | |
aws iam put-role-policy \ | |
--role-name $IAM_ROLE \ | |
--policy-name $NAME-AutoScaling \ | |
--policy-document file://scaling/eks-autoscaling-policy.json | |
helm install stable/cluster-autoscaler \ | |
--name aws-cluster-autoscaler \ | |
--namespace kube-system \ | |
--set autoDiscovery.clusterName=$NAME \ | |
--set awsRegion=$AWS_DEFAULT_REGION \ | |
--set sslCertPath=/etc/kubernetes/pki/ca.crt \ | |
--set rbac.create=true | |
kubectl -n kube-system \ | |
rollout status \ | |
deployment aws-cluster-autoscaler | |
####################### | |
# Destroy the cluster # | |
####################### | |
export AWS_DEFAULT_REGION=us-west-2 | |
IAM_ROLE=$(aws iam list-roles \ | |
| jq -r ".Roles[] \ | |
| select(.RoleName \ | |
| startswith(\"eksctl-$NAME-nodegroup-0-NodeInstanceRole\")) \ | |
.RoleName") | |
echo $IAM_ROLE | |
aws iam delete-role-policy \ | |
--role-name $IAM_ROLE \ | |
--policy-name $NAME-AutoScaling | |
eksctl delete cluster -n $NAME | |
SG_NAME=$(aws ec2 describe-security-groups \ | |
--filters Name=group-name,Values=k8s-elb-$LB_NAME \ | |
| jq -r ".SecurityGroups[0].GroupId") | |
echo $SG_NAME | |
aws ec2 delete-security-group \ | |
--group-id $SG_NAME |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment