Skip to content

Instantly share code, notes, and snippets.

@gmlp
Forked from vfarcic/eks-logging.sh
Created January 25, 2019 16:53
Show Gist options
  • Save gmlp/527f06f9d2e6361a4012511b48284606 to your computer and use it in GitHub Desktop.
Save gmlp/527f06f9d2e6361a4012511b48284606 to your computer and use it in GitHub Desktop.
######################
# Create The Cluster #
######################
# Make sure that you're using eksctl v0.1.5+.
# Follow the instructions from https://github.com/weaveworks/eksctl to intall eksctl.
export AWS_ACCESS_KEY_ID=[...] # Replace [...] with AWS access key ID
export AWS_SECRET_ACCESS_KEY=[...] # Replace [...] with AWS secret access key
export AWS_DEFAULT_REGION=us-west-2
export NAME=devops25
mkdir -p cluster
eksctl create cluster \
-n $NAME \
-r $AWS_DEFAULT_REGION \
--kubeconfig cluster/kubecfg-eks \
--node-type t2.large \
--nodes 3 \
--nodes-max 9 \
--nodes-min 3
export KUBECONFIG=$PWD/cluster/kubecfg-eks
###################
# Install Ingress #
###################
kubectl apply \
-f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml
kubectl apply \
-f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l4.yaml
kubectl apply \
-f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l4.yaml
##################
# Install Tiller #
##################
kubectl create \
-f https://raw.githubusercontent.com/vfarcic/k8s-specs/master/helm/tiller-rbac.yml \
--record --save-config
helm init --service-account tiller
kubectl -n kube-system \
rollout status deploy tiller-deploy
##################
# Metrics Server #
##################
helm install stable/metrics-server \
--name metrics-server \
--version 2.0.2 \
--namespace metrics
kubectl -n metrics \
rollout status \
deployment metrics-server
##############
# Install CA #
##############
ASG_NAME=$(aws autoscaling \
describe-auto-scaling-groups \
| jq -r ".AutoScalingGroups[] \
| select(.AutoScalingGroupName \
| startswith(\"eksctl-$NAME-nodegroup\")) \
.AutoScalingGroupName")
echo $ASG_NAME
aws autoscaling \
create-or-update-tags \
--tags \
ResourceId=$ASG_NAME,ResourceType=auto-scaling-group,Key=k8s.io/cluster-autoscaler/enabled,Value=true,PropagateAtLaunch=true \
ResourceId=$ASG_NAME,ResourceType=auto-scaling-group,Key=kubernetes.io/cluster/$NAME,Value=true,PropagateAtLaunch=true
IAM_ROLE=$(aws iam list-roles \
| jq -r ".Roles[] \
| select(.RoleName \
| startswith(\"eksctl-$NAME-nodegroup-0-NodeInstanceRole\")) \
.RoleName")
echo $IAM_ROLE
aws iam put-role-policy \
--role-name $IAM_ROLE \
--policy-name $NAME-AutoScaling \
--policy-document file://scaling/eks-autoscaling-policy.json
helm install stable/cluster-autoscaler \
--name aws-cluster-autoscaler \
--namespace kube-system \
--set autoDiscovery.clusterName=$NAME \
--set awsRegion=$AWS_DEFAULT_REGION \
--set sslCertPath=/etc/kubernetes/pki/ca.crt \
--set rbac.create=true
kubectl -n kube-system \
rollout status \
deployment aws-cluster-autoscaler
##################
# Get Cluster IP #
##################
LB_HOST=$(kubectl -n ingress-nginx \
get svc ingress-nginx \
-o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
export LB_IP="$(dig +short $LB_HOST \
| tail -n 1)"
echo $LB_IP
# Repeat the `export` command if the output is empty
#######################
# Destroy the cluster #
#######################
export AWS_DEFAULT_REGION=us-west-2
LB_NAME=$(aws elb \
describe-load-balancers \
| jq -r \
".LoadBalancerDescriptions[] \
| select(.SourceSecurityGroup.GroupName \
| contains (\"k8s-elb\")) \
.LoadBalancerName")
echo $LB_NAME
aws elb delete-load-balancer \
--load-balancer-name $LB_NAME
aws iam delete-role-policy \
--role-name $IAM_ROLE \
--policy-name $NAME-AutoScaling
eksctl delete cluster -n devops25
SG_NAME=$(aws ec2 describe-security-groups \
--filters Name=group-name,Values=k8s-elb-$LB_NAME \
| jq -r ".SecurityGroups[0].GroupId")
echo $SG_NAME
aws ec2 delete-security-group \
--group-id $SG_NAME
for VOLUME in `aws ec2 describe-volumes --region $AWS_DEFAULT_REGION --output text| grep available | awk '{print $8}'`
do
aws ec2 delete-volume --region $AWS_DEFAULT_REGION --volume-id $VOLUME
done
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment