Skip to content

Instantly share code, notes, and snippets.

@Hammond95
Created April 28, 2022 09:23
Show Gist options
  • Save Hammond95/fb005ac77c368d95cfc5689ed4a662b5 to your computer and use it in GitHub Desktop.
Save Hammond95/fb005ac77c368d95cfc5689ed4a662b5 to your computer and use it in GitHub Desktop.
preBootstrapCommands: &preboot
- "export DOCKER_SECRET='my-docker-secret'"
- "export DISK_WITH_RAID=''"
- "export DISK_MOUNT=''"
- |
#!/bin/bash -x
# Writing Docker Secret
if [[ ! -z "${DOCKER_SECRET}" ]]; then
secret=$(aws --region $AWS_REGION secretsmanager get-secret-value --secret-id ${DOCKER_SECRET} --query SecretString --output text)
cat << EOF > /var/lib/kubelet/config.json
{
"auths": {
"https://index.docker.io/v1/": $secret
}
}
EOF
fi
# Disk Mount
if [[ ! -z "${DISK_MOUNT}" ]]; then
echo "Mounting SSD on /mnt/disks/ssd/"
mkdir -p /mnt/disks/ssd/
mkfs -t ext4 /dev/nvme1n1
mount /dev/nvme1n1 /mnt/disks/ssd/
echo "/dev/nvme1n1 /mnt/disks/ssd/ ext4 defaults,nofail 0 2" >> /etc/fstab
chown -R 1000:1000 /mnt/disks/ssd/
set -o xtrace
fi
# Disk with RAID
if [[ ! -z "${DISK_WITH_RAID}" ]]; then
echo "Mounting RAID-0 volume on /var/lib/kubelet/pods"
yum install -y mdadm.x86_64
mkdir -p /var/lib/kubelet/pods
disks=()
for ssd in /dev/nvme[1-9]*n*
do
disks+=("$ssd")
done
yes | mdadm --create --verbose /dev/md0 --level=0 --name=raid_0_volume --raid-devices=\${#disks[@]} \${disks[@]}
mkfs -t ext4 /dev/md0
mount /dev/md0 /var/lib/kubelet/pods
echo "/dev/md0 /var/lib/kubelet/pods ext4 defaults,nofail 0 2" >> /etc/fstab
set -o xtrace
fi
# Docker Daemon
# -------------------------------------
cat > /etc/docker/daemon.json <<EOL
{
"bridge": "none",
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "10"
},
"live-restore": true,
"max-concurrent-downloads": 10,
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Soft": 65536,
"Hard": 65536
}
}
}
EOL
echo "Overridden docker daemon.json"
cat /etc/docker/daemon.json
systemctl restart docker
set -o xtrace
---
apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
metadata:
name: sandbox
region: eu-west-1
version: '1.21'
tags:
environment: sandbox
vpc:
id: "vpc-*************"
subnets:
private:
private-zone-a:
id: "subnet-***********"
cidr: "x.x.x.x/20"
az: "eu-west-1a"
private-zone-b:
id: "subnet-***********"
cidr: "x.x.x.x/20"
az: "eu-west-1b"
private-zone-c:
id: "subnet-***********"
cidr: "x.x.x.x/20/20"
az: "eu-west-1c"
privateCluster:
enabled: true
# Internal Endpoints are defined within the VPC IaaC
skipEndpointCreation: true
#addons:
# - name: vpc-cni
# version: latest
# attachPolicyARNs:
# - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
# - name: coredns
# version: latest
# - name: kube-proxy
# version: latest
nodeGroups:
- name: eks-sandbox-ng-spot-private-zone-a
#launchTemplate:
# id: "lt-*********"
# version: "1"
privateNetworking: true
subnets:
- private-zone-a
minSize: 1
maxSize: 10
volumeSize: 100
volumeType: gp2
# 1.21.2
ami: ami-0496007bb2c2b397c
ssh:
# use existing EC2 key
allow: true
publicKeyName: "test-sandbox"
instancesDistribution:
maxPrice: 1.2
instanceTypes: ["m5.xlarge","m5.2xlarge","c5.xlarge","c5.2xlarge","m4.xlarge","m4.2xlarge"]
onDemandBaseCapacity: 0
onDemandPercentageAboveBaseCapacity: 0
spotAllocationStrategy: capacity-optimized
updateConfig:
#maxUnavailable: 3
maxUnavailablePercentage: 75
tags:
k8s.io/cluster-autoscaler/node-template/label/role: "eks-sandbox-ng-spot-private-zone-a"
spotWorker: "true"
# https://github.com/weaveworks/eksctl/blob/506e8da517e8c5f020551102436ad7df4eea06cf/pkg/cfn/builder/nodegroup.go#L270-L279
propagateASGTags: true
labels:
spotWorker: "true"
role: "eks-sandbox-ng-spot-private-zone-a"
asgMetricsCollection:
- granularity: 1Minute
metrics:
- GroupMinSize
- GroupMaxSize
- GroupDesiredCapacity
- GroupInServiceInstances
- GroupPendingInstances
- GroupStandbyInstances
- GroupTerminatingInstances
- GroupTotalInstances
# https://github.com/weaveworks/eksctl/blob/7c7c0924080542b6558bd9bae281286e4e118c05/pkg/nodebootstrap/userdata.go#L134-L152
# EKSCTL parses yaml as JSON and applies KVs based on
# kubelet apis defined here:
# https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/
kubeletExtraConfig:
containerLogMaxSize: '100Mi'
podPidsLimit: 2048
#maxPods: 110 #default
<<: *preboot
overrideBootstrapCommand: |
#!/bin/bash
/etc/eks/bootstrap.sh sandbox
#--kubelet-extra-args
#"--node-labels=spot-worker=true,role=eks-sandbox-ng-spot-private-zone-a"
cloudWatch:
clusterLogging:
enableTypes: ["audit", "authenticator"]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment