Skip to content

Instantly share code, notes, and snippets.

@HarshadRanganathan
Created September 2, 2022 20:22
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save HarshadRanganathan/aac6f59bf5b12c8a120e231744e701d1 to your computer and use it in GitHub Desktop.
Save HarshadRanganathan/aac6f59bf5b12c8a120e231744e701d1 to your computer and use it in GitHub Desktop.
EKS Upgrade Jenkins Pipeline Scripts
#!/usr/bin/env groovy
final List<String> environments = Env.values().collect() { it.name() }
pipeline {
agent {
label any
}
parameters {
string(name: 'currentk8sVersion', defaultValue: '', description: 'Provide the k8s Major & Minor version to upgrade from e.g. 1.19')
choice(name: 'Env', choices: environments, description: 'Environment to upgrade')
}
stages {
stage('List Clusters') {
steps {
awsAuthentication(params.Env as Env)
echo 'Successfully logged to AWS'
script {
String cluster = awsCluster(params.Env.trim().toLowerCase())
echo '${cluster}'
connectToEksCluster(cluster.trim(), "us-east-1")
}
}
}
stage('Check K8s and nodes version') {
steps {
echo 'Checking k8s version of Cluster'
script {
try {
sh '''
kubectl version --short
kubectl get nodes
'''
} catch(err){
echo err.toString()
}
}
}
}
stage('EKS priviliged policy') {
steps {
echo 'Make sure EKS priviliged policy exists'
script {
try {
sh 'kubectl get psp eks.privileged'
} catch(err) {
echo err.toString()
}
}
}
}
stage('Disable cluster autoscaler') {
steps {
echo 'Disable cluster autoscaler'
script {
try {
sh 'kubectl scale deployments/cluster-autoscaler --replicas=0 -n kube-system'
} catch(err) {
echo err.toString()
}
}
}
}
stage('Get autoscaling group name & increase node capacity by 1') {
steps {
script {
try {
sh """#!/bin/bash
cluster_name=`kubectl config current-context | cut -d '/' -f2`
echo "\${cluster_name}"
worker_tag=`echo "\${cluster_name}" | sed s/"-cluster"//`
echo "\${worker_tag}"
auto_scaling_group_name=`aws autoscaling describe-auto-scaling-groups --query "AutoScalingGroups[? Tags[? (Key=='Name') && Value=='\${worker_tag}-workers']]".AutoScalingGroupName --output text`
echo "\${auto_scaling_group_name}"
current_desired_capacity=`aws autoscaling describe-auto-scaling-groups --query "AutoScalingGroups[? Tags[? (Key=='Name') && Value=='\${worker_tag}-workers']]".DesiredCapacity --output text`
new_desired_capacity=`expr "\${current_desired_capacity}" + 1`
current_max_capacity=`aws autoscaling describe-auto-scaling-groups --query "AutoScalingGroups[? Tags[? (Key=='Name') && Value=='\${worker_tag}-workers']]".MaxSize --output text`
if [ \${new_desired_capacity} -gt \${current_max_capacity} ]
then
new_max_capacity=`expr "\${current_max_capacity}" + 1`
aws autoscaling update-auto-scaling-group --auto-scaling-group-name \${auto_scaling_group_name} --max-size \${new_max_capacity}
fi
aws autoscaling set-desired-capacity --auto-scaling-group-name \${auto_scaling_group_name} --desired-capacity \${new_desired_capacity}
"""
} catch (err) {
echo err.toString()
}
}
}
}
stage('Check if new nodes are using latest AMI') {
steps {
echo 'Checking if the new nodes are using latest AMI'
script{
try {
sh '''
kubectl get nodes
'''
} catch(err) {
echo err.toString()
}
}
}
}
stage('Taint Nodes') {
steps {
script {
try{
sh """
echo "k8 version is ${params.currentk8sVersion}"
export PATH=$PATH:/tools/jq/jq-1.5/jq
nodes=`kubectl get nodes -o json | jq -r '.items[] | select(.status.nodeInfo.kubeletVersion | contains(\"v${params.currentk8sVersion}\")) | .metadata.name' | tr '\n' ' '`
echo "\${nodes}"
for node in \${nodes[@]}
do
echo "Tainting \$node"
kubectl taint nodes \$node key=value:NoSchedule
done
"""
} catch (err) {
echo err.toString()
}
}
}
}
stage('Drain Nodes') {
input{
message "Do you want to proceed to Drain the old nodes ?"
}
steps {
sh """
echo "k8 version is ${params.currentk8sVersion}"
export PATH=$PATH:/tools/jq/jq-1.5/jq
nodes=`kubectl get nodes -o json | jq -r '.items[] | select(.status.nodeInfo.kubeletVersion | contains(\"v${params.currentk8sVersion}\")) | .metadata.name' | tr '\n' ' '`
echo "\${nodes}"
for node in \${nodes[@]}
do
echo "Draining \$node"
kubectl drain \$node --ignore-daemonsets --delete-emptydir-data --force
echo "Sleeping for 5 mins to allow pod startups in new node"
sleep 300
echo "Terminating \$node"
instance_id=$(kubectl get nodes -o json | jq -r '.items[] | select(.metadata.name == '\"\$node\"') | .spec.providerID' | sed 's/.*\(i-.*\)/\1/')
aws autoscaling terminate-instance-in-auto-scaling-group --instance-id \$instance_id --no-should-decrement-desired-capacity
done
"""
}
}
stage('Validate Pods') {
steps {
sh """
kubectl get pods --field-selector=status.phase!=Running --all-namespaces
"""
}
}
stage('No Old Nodes') {
steps {
echo 'Checking if the old nodes have been removed completely'
script {
try {
sh '''
kubectl get nodes
'''
} catch(err) {
echo err.toString()
}
}
}
}
stage('Decrease Desired, Maximum capacity to original values') {
steps {
script {
try {
sh """#!/bin/bash
current_desired_capacity=`aws autoscaling describe-auto-scaling-groups --query "AutoScalingGroups[? Tags[? (Key=='Name') && Value=='\${worker_tag}-workers']]".DesiredCapacity --profile saml --output text`
echo "\${current_desired_capacity}"
new_desired_capacity=`expr "\${current_desired_capacity}" - 1`
echo "\${new_desired_capacity}"
current_max_capacity=`aws autoscaling describe-auto-scaling-groups --query "AutoScalingGroups[? Tags[? (Key=='Name') && Value=='\${worker_tag}-workers']]".MaxSize --profile saml --output text`
echo "\${current_max_capacity}"
if [ \${current_desired_capacity} -eq \${current_max_capacity} ]
then
new_max_capacity=`expr "\${current_max_capacity}" - 1`
echo "\${new_max_capacity}"
aws autoscaling update-auto-scaling-group --auto-scaling-group-name \${auto_scaling_group_name} --max-size \${new_max_capacity} --profile=saml
fi
aws autoscaling set-desired-capacity --auto-scaling-group-name \${auto_scaling_group_name} --desired-capacity \${new_desired_capacity} --profile=saml
"""
} catch (err) {
echo err.toString()
}
}
}
}
stage('Enable cluster autoscaler') {
steps {
echo 'Enable cluster autoscaler'
script {
try {
sh 'kubectl scale deployments/cluster-autoscaler --replicas=1 -n kube-system'
} catch(err) {
echo err.toString()
}
}
}
}
stage('Checkout SCM') {
steps {
checkout([$class: 'GitSCM', branches: [[name: '*/master']], userRemoteConfigs: [[url: 'https://github.com/HarshadRanganathan/terraform-aws-eks']]])
}
}
stage('Upgrade VPC CNI Plugin to 1.11 and verify') {
steps {
echo 'update cni plugin'
script {
try {
sh 'kubectl apply -f addons/vpc-cni/1.11.3/aws-k8s-cni.yaml'
sh 'kubectl describe daemonset aws-node --namespace kube-system | grep Image | cut -d "/" -f 2'
} catch(err) {
echo err.toString
}
}
}
}
stage('Upgrade Core DNS version') {
steps {
echo 'update dns version'
script {
try {
sh 'kubectl describe pod coredns --namespace kube-system | grep Image |grep eksbuild | cut -d "/" -f 3'
sh 'kubectl set image --namespace kube-system deployment.apps/coredns coredns=602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/coredns:v1.8.4-eksbuild.1'
} catch(err) {
echo err.toString
}
}
}
}
stage('Upgrade the Kube Proxy Plugin') {
steps {
echo 'Upgrade the Kube Proxy Plugin'
script {
try {
sh 'kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v1.21.14-eksbuild.2'
} catch(err) {
echo err.toString
}
}
}
}
}
}
private void connectToEksCluster(String cluster, String region) {
sh 'aws eks --region ' + region + ' update-kubeconfig --name ' + cluster
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment