Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
########################################################################################
# #
# Create a new subnet in az-1a and associate it with the az-1a route table #
# #
########################################################################################
resource "aws_subnet" "extra_az_1a" {
vpc_id = var.eks_vpc_id
cidr_block = "100.64.0.0/19"
availability_zone = "eu-west-1a"
depends_on = [
aws_vpc_ipv4_cidr_block_association.secondary_cidr
]
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.extra_az_1a.id
route_table_id = var.az_1a_route_table_id
}
########################################################################################
# #
# Create a new subnet in az-1b and associate it with the az-1b route table #
# #
########################################################################################
resource "aws_subnet" "extra_az_1b" {
vpc_id = var.eks_vpc_id
cidr_block = "100.64.32.0/19"
availability_zone = "eu-west-1b"
depends_on = [
aws_vpc_ipv4_cidr_block_association.secondary_cidr
]
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.extra_az_1b.id
route_table_id = var.az_1b_route_table_id
}
########################################################################################
# #
# Create a new subnet in az-1c and associate it with the az-1c route table #
# #
########################################################################################
resource "aws_subnet" "extra_az_1c" {
vpc_id = var.eks_vpc_id
cidr_block = "100.64.64.0/19"
availability_zone = "eu-west-1c"
depends_on = [
aws_vpc_ipv4_cidr_block_association.secondary_cidr
]
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.extra_az_1c.id
route_table_id = var.az_1c_route_table_id
}
########################################################################################
# #
# 🚀 Create EKS cluster 🚀 #
# #
########################################################################################
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "v17.20.0"
cluster_name = "dev-cluster"
cluster_version = "1.20"
subnets = [var.az_1a_subnet_id, var.az_1b_subnet_id, var.az_1c_subnet_id]
vpc_id = var.eks_vpc_id
cluster_endpoint_private_access = true
cluster_create_endpoint_private_access_sg_rule = true
cluster_endpoint_private_access_cidrs = ["10.0.0.0/8"] // Your enterprise CIDR range that should be allowed access to the k8s API
node_groups_defaults = {
ami_type = "AL2_x86_64"
disk_size = 50
}
// Here we're using a managed EKS node group
node_groups = {
example = {
desired_capacity = 2
max_capacity = 2
min_capacity = 2
instance_types = ["t3.medium"]
update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
}
}
depends_on = [
aws_subnet.extra_az_1a,
aws_subnet.extra_az_1b,
aws_subnet.extra_az_1c,
aws_route_table_association.a,
aws_route_table_association.b,
aws_route_table_association.c,
]
}
########################################################################################
# #
# Annotate nodes #
# This will prepare the installation of AWS VPC CNI #
# #
########################################################################################
resource "null_resource" "annotate_nodes" {
triggers = {
"sha256" = filesha256("./annotate-nodes.sh")
}
provisioner "local-exec" {
command = <<EOH
chmod 0755 annotate-nodes.sh
./annotate-nodes.sh
EOH
}
depends_on = [
module.eks
]
}
########################################################################################
# #
# Deploy AWS VPC CNI #
# #
########################################################################################
resource "helm_release" "vpc_cni" {
name = "aws-vpc-cni"
namespace = "kube-system"
repository = "https://aws.github.io/eks-charts"
chart = "aws-vpc-cni"
values = [
<<EOT
crd:
create: false # This were already created by the EKS module
originalMatchLabels: true
eniConfig:
create: true
region: eu-west-1
subnets:
a:
id: ${aws_subnet.extra_az_1a.id}
securityGroups:
- ${module.eks.worker_security_group_id}
b:
id: ${aws_subnet.extra_az_1b.id}
securityGroups:
- ${module.eks.worker_security_group_id}
c:
id: ${aws_subnet.extra_az_1c.id}
securityGroups:
- ${module.eks.worker_security_group_id}
env:
AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG: true
ENI_CONFIG_LABEL_DEF: "failure-domain.beta.kubernetes.io/zone"
EOT
]
depends_on = [
null_resource.annotate_nodes
]
}
########################################################################################
# #
# Cycle nodes #
# This is needed to ensure all new pods will be put on the custom ENI configs #
# #
########################################################################################
resource "null_resource" "cycle_nodes" {
triggers = {
"sha256" = filesha256("./cycle-nodes.sh")
}
provisioner "local-exec" {
command = <<EOH
chmod 0755 cycle-nodes.sh
./cycle-nodes.sh -c dev-cluster
EOH
}
depends_on = [
helm_release.vpc_cni
]
}
### annotate-nodes.sh
#!/bin/bash
set -euo pipefail
for kind in daemonSet clusterRole clusterRoleBinding serviceAccount; do
echo "setting annotations and labels on $kind/aws-node"
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-name=aws-vpc-cni
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-namespace=kube-system
kubectl -n kube-system label --overwrite $kind aws-node app.kubernetes.io/managed-by=Helm
done
### cycle-nodes.sh
#!/bin/bash
usage() { echo "Usage: $0 [-c <cluster-name>] [-a <assume-role>]" 1>&2; exit 1; }
while getopts ":c:a:" o; do
case "${o}" in
c)
CLUSTER=${OPTARG}
;;
a)
ASSUME_ROLE_ARN=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${CLUSTER}" ]; then
usage
fi
if [ ! -z "$ASSUME_ROLE_ARN" ]; then
temp_role=$(aws sts assume-role \
--role-arn $ASSUME_ROLE_ARN \
--role-session-name temp)
export AWS_ACCESS_KEY_ID=$(echo $temp_role | jq -r .Credentials.AccessKeyId)
export AWS_SECRET_ACCESS_KEY=$(echo $temp_role | jq -r .Credentials.SecretAccessKey)
export AWS_SESSION_TOKEN=$(echo $temp_role | jq -r .Credentials.SessionToken)
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY
aws configure set aws_session_token $AWS_SESSION_TOKEN
fi
# Get a list of all the instances in the node group
comm=`printf "aws ec2 describe-instances --query 'Reservations[*].Instances[*].InstanceId' --filters "Name=tag-key,Values=eks:cluster-name" "Name=tag-value,Values=$CLUSTER" --output text"`
INSTANCE_IDS=(`eval $comm`)
target=$(kubectl get nodes | grep Read | wc -l)
# iterate through nodes - terminate one at a time
for i in "${INSTANCE_IDS[@]}"
do
curr=0
echo "Terminating EC2 instance $i ... "
aws ec2 terminate-instances --instance-ids $i | jq -r .TerminatingInstances[0].CurrentState.Name
while [ $curr -ne $target ]; do
stat=$(aws ec2 describe-instance-status --instance-ids $i --include-all-instances | jq -r .InstanceStatuses[0].InstanceState.Name)
if [ "$stat" == "terminated" ]; then
sleep 15
curr=$(kubectl get nodes | grep -v NotReady | grep Read | wc -l)
kubectl get nodes
echo "Current Ready nodes = $curr of $target"
fi
if [ "$stat" != "terminated" ]; then
sleep 10
echo "$i $stat"
fi
done
done
echo "done"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment