Skip to content

Instantly share code, notes, and snippets.

View joakimhew's full-sized avatar
🐐
Golang and Kubernetes loving Swede

Joakim Hansson joakimhew

🐐
Golang and Kubernetes loving Swede
View GitHub Profile
#!/bin/bash
set -euo pipefail
for kind in daemonSet clusterRole clusterRoleBinding serviceAccount; do
echo "setting annotations and labels on $kind/aws-node"
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-name=aws-vpc-cni
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-namespace=kube-system
kubectl -n kube-system label --overwrite $kind aws-node app.kubernetes.io/managed-by=Helm
done
########################################################################################
# #
# Create a new subnet in az-1a and associate it with the az-1a route table #
# #
########################################################################################
resource "aws_subnet" "extra_az_1a" {
vpc_id = var.eks_vpc_id
cidr_block = "100.64.0.0/19"
availability_zone = "eu-west-1a"
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "v17.20.0"
cluster_name = "dev-cluster"
cluster_version = "1.20"
subnets = ["10.24.56.128/26", "10.24.56.192/27", "10.24.56.224/27"]
vpc_id = var.eks_vpc_id
cluster_endpoint_private_access = true // In this example, we only want to allow access to the Kubernetes API from within our enterprise network
cluster_create_endpoint_private_access_sg_rule = true
cluster_endpoint_private_access_cidrs = ["10.0.0.0/8"] // Your enterprise CIDR range that should be allowed access to the k8s API
resource "null_resource" "cycle_nodes" {
triggers = {
"sha256" = filesha256("./cycle-nodes.sh")
}
provisioner "local-exec" {
command = <<EOH
chmod 0755 cycle-nodes.sh
./cycle-nodes.sh -c dev-cluster
EOH
#!/bin/bash
usage() { echo "Usage: $0 [-c <cluster-name>] [-a <assume-role>]" 1>&2; exit 1; }
while getopts ":c:a:" o; do
case "${o}" in
c)
CLUSTER=${OPTARG}
;;
a)
ASSUME_ROLE_ARN=${OPTARG}
apiVersion: crd.k8s.amazonaws.com/v1alpha1
kind: ENIConfig
metadata:
name: group1-pod-netconfig
spec:
subnet: subnet-04f960ffc8be6865c
securityGroups:
- sg-070d03008bda531ad
- sg-06e5cab8e5d6f16ef
resource "null_resource" "annotate_nodes" {
triggers = {
"sha256" = filesha256("./annotate-nodes.sh")
}
provisioner "local-exec" {
command = <<EOH
chmod 0755 annotate-nodes.sh
./annotate-nodes.sh
EOH
#!/bin/bash
set -euo pipefail
for kind in daemonSet clusterRole clusterRoleBinding serviceAccount; do
echo "setting annotations and labels on $kind/aws-node"
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-name=aws-vpc-cni
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-namespace=kube-system
kubectl -n kube-system label --overwrite $kind aws-node app.kubernetes.io/managed-by=Helm
done
resource "helm_release" "this" {
name = "aws-vpc-cni"
namespace = "kube-system"
repository = "https://aws.github.io/eks-charts"
chart = "aws-vpc-cni"
values = [
<<EOT
crd:
########################################################################################
# #
# Create a new subnet in az-1a and associate it with the az-1a route table #
# #
########################################################################################
resource "aws_subnet" "extra_az_1a" {
vpc_id = var.eks_vpc_id
cidr_block = "100.64.0.0/19"
availability_zone = "eu-west-1a"