Skip to content

Instantly share code, notes, and snippets.

@cknowles
Last active March 13, 2018 09:04
Show Gist options
  • Save cknowles/3446b8de796ce7c2a12c6ceba1d03eab to your computer and use it in GitHub Desktop.
Save cknowles/3446b8de796ce7c2a12c6ceba1d03eab to your computer and use it in GitHub Desktop.
dev kube-aws setup
clusterName: k8s-dev-v8
releaseChannel: stable
sshAccessAllowedSourceCIDRs: []
adminAPIEndpointName: versioned
apiEndpoints:
- name: versioned
dnsName: kubeapi-dev-v8.OBFUSCATED
loadBalancer:
createRecordSet: true
hostedZone:
id: OBFUSCATED
recordSetTTL: 60
- name: unversioned
dnsName: kubeapi-dev.OBFUSCATED
loadBalancer:
managed: false
keyName: OBFUSCATED
region: eu-west-1
kmsKeyArn: "arn:aws:kms:eu-west-1:OBFUSCATED:key/OBFUSCATED"
controller:
autoScalingGroup:
minSize: 1
maxSize: 1
rollingUpdateMinInstancesInService: 0
createTimeout: PT20M
iam:
policy:
statements:
- effect: Allow
actions:
- route53:ListHostedZones
- route53:ListResourceRecordSets
resources:
- "*"
- effect: Allow
actions:
- route53:ChangeResourceRecordSets
resources:
- arn:aws:route53:::hostedzone/OBFUSCATED
instanceTags:
Role: controller
Type: ondemand
instanceType: m3.medium
subnets:
- name: ManagedPrivateSubnetWithExistingNGW1
- name: ManagedPrivateSubnetWithExistingNGW2
- name: ManagedPrivateSubnetWithExistingNGW3
customSystemdUnits: &customSystemdUnits
- name: docker-healthcheck.service
command: start
enable: true
content: |
[Unit]
Description=Run docker-healthcheck once
After=docker.service
[Service]
Type=oneshot
ExecStart=/opt/bin/docker-healthcheck
[Install]
WantedBy=multi-user.target
- name: docker-healthcheck.timer
command: start
enable: true
content: |
[Unit]
Description=Trigger docker-healthcheck periodically
After=docker.service
[Timer]
OnUnitInactiveSec=30s
Unit=docker-healthcheck.service
[Install]
WantedBy=multi-user.target
customFiles: &customFiles
- path: "/opt/bin/docker-healthcheck"
owner: root:root
permissions: 0700
content: |
#!/bin/bash
if timeout 10 docker ps > /dev/null; then
exit 0
fi
echo "docker failed"
echo "Giving docker 30 seconds grace before restarting"
sleep 30
if timeout 10 docker ps > /dev/null; then
echo "docker recovered"
exit 0
fi
echo "docker still down; triggering docker restart"
systemctl restart containerd docker
echo "Waiting 60 seconds to give docker time to start"
sleep 60
if timeout 10 docker ps > /dev/null; then
echo "docker recovered"
exit 0
fi
echo "docker still failing"
worker:
apiEndpointName: versioned
nodePools:
- name: pool-A
autoscaling:
clusterAutoscaler:
enabled: true
autoScalingGroup:
minSize: 1
maxSize: 3
rollingUpdateMinInstancesInService: 1
availabilityZone: eu-west-1a
awsNodeLabels:
enabled: true
customSystemdUnits:
*customSystemdUnits
customFiles:
*customFiles
instanceType: t2.medium
kube2IamSupport:
enabled: true
nodeDrainer:
enabled: true
securityGroupIds:
- sg-OBFUSCATED
stackTags:
Role: worker
Type: ondemand
subnets:
- name: ManagedPrivateSubnetWithExistingNGW1
waitSignal:
enabled: true
- name: pool-B
autoscaling:
clusterAutoscaler:
enabled: true
autoScalingGroup:
minSize: 0
maxSize: 3
rollingUpdateMinInstancesInService: 1
availabilityZone: eu-west-1b
awsNodeLabels:
enabled: true
customSystemdUnits:
*customSystemdUnits
customFiles:
*customFiles
instanceType: t2.medium
kube2IamSupport:
enabled: true
nodeDrainer:
enabled: true
securityGroupIds:
- sg-OBFUSCATED
stackTags:
Role: worker
Type: ondemand
subnets:
- name: ManagedPrivateSubnetWithExistingNGW2
waitSignal:
enabled: true
- name: pool-C
autoscaling:
clusterAutoscaler:
enabled: true
autoScalingGroup:
minSize: 0
maxSize: 3
rollingUpdateMinInstancesInService: 1
availabilityZone: eu-west-1c
awsNodeLabels:
enabled: true
customSystemdUnits:
*customSystemdUnits
customFiles:
*customFiles
instanceType: t2.medium
kube2IamSupport:
enabled: true
nodeDrainer:
enabled: true
securityGroupIds:
- sg-OBFUSCATED
stackTags:
Role: worker
Type: ondemand
subnets:
- name: ManagedPrivateSubnetWithExistingNGW3
waitSignal:
enabled: true
- name: pool-S
autoscaling:
clusterAutoscaler:
enabled: false
awsNodeLabels:
enabled: true
customSystemdUnits:
*customSystemdUnits
customFiles:
*customFiles
instanceTags:
Cluster: development
Role: worker
kube2IamSupport:
enabled: true
nodeDrainer:
enabled: true
securityGroupIds:
- sg-OBFUSCATED
spotFleet:
targetCapacity: 2
spotPrice: 0.036
launchSpecifications:
- weightedCapacity: 1
instanceType: c4.large
spotPrice: 0.036
- weightedCapacity: 2
instanceType: c4.xlarge
spotPrice: 0.036
- weightedCapacity: 4
instanceType: c4.2xlarge
spotPrice: 0.036
- weightedCapacity: 1
instanceType: m4.large
spotPrice: 0.036
- weightedCapacity: 2
instanceType: m4.xlarge
spotPrice: 0.036
- weightedCapacity: 4
instanceType: m4.2xlarge
spotPrice: 0.036
stackTags:
Role: worker
Type: spot
subnets:
- name: ManagedPrivateSubnetWithExistingNGW1
- name: ManagedPrivateSubnetWithExistingNGW2
- name: ManagedPrivateSubnetWithExistingNGW3
waitSignal:
enabled: true
etcd:
count: 1
instanceTags:
Role: etcd
Type: ondemand
instanceType: m3.medium
subnets:
- name: ManagedPrivateSubnetWithExistingNGW1
- name: ManagedPrivateSubnetWithExistingNGW2
- name: ManagedPrivateSubnetWithExistingNGW3
vpc:
id: vpc-OBFUSCATED
internetGateway:
id: igw-OBFUSCATED
subnets:
- name: ManagedPrivateSubnetWithExistingNGW1
private: true
availabilityZone: eu-west-1a
instanceCIDR: "10.0.30.0/24"
natGateway:
idFromStackOutput: vpc-nat-gateway-1
- name: ManagedPrivateSubnetWithExistingNGW2
private: true
availabilityZone: eu-west-1b
instanceCIDR: "10.0.31.0/24"
natGateway:
idFromStackOutput: vpc-nat-gateway-2
- name: ManagedPrivateSubnetWithExistingNGW3
private: true
availabilityZone: eu-west-1c
instanceCIDR: "10.0.32.0/24"
natGateway:
idFromStackOutput: vpc-nat-gateway-3
- name: ManagedPublicSubnet1
private: false
availabilityZone: eu-west-1a
instanceCIDR: "10.0.33.0/24"
- name: ManagedPublicSubnet2
private: false
availabilityZone: eu-west-1b
instanceCIDR: "10.0.34.0/24"
- name: ManagedPublicSubnet3
private: false
availabilityZone: eu-west-1c
instanceCIDR: "10.0.35.0/24"
serviceCIDR: "10.3.0.0/16"
kubernetesDashboard:
adminPrivileges: false
insecureLogin: true
cloudWatchLogging:
enabled: true
retentionInDays: 7
kubeDns:
nodeLocalResolver: true
kubeProxy:
ipvsMode:
enabled: false
scheduler: rr
syncPeriod: 300s
minSyncPeriod: 60s
addons:
clusterAutoscaler:
enabled: true
rescheduler:
enabled: true
metricsServer:
enabled: false
prometheus:
securityGroupsEnabled: false
experimental:
admission:
podSecurityPolicy:
enabled: false
alwaysPullImages:
enabled: false
denyEscalatingExec:
enabled: false
initializers:
enabled: true
priority:
enabled: false
awsEnvironment:
enabled: false
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
auditLog:
enabled: true
maxage: 30
logpath: /var/log/kube-apiserver-audit.log
authentication:
webhook:
enabled: false
cacheTTL: 1m0s
configBase64: base64-encoded-webhook-yaml
awsNodeLabels:
enabled: false
clusterAutoscalerSupport: # default value is not documented by kube-aws in cluster.yaml
enabled: true
tlsBootstrap:
enabled: false
nodeAuthorizer:
enabled: false
ephemeralImageStorage:
enabled: false
kube2IamSupport:
enabled: true
nodeDrainer:
enabled: true
drainTimeout: 5
iamRole:
arn: ""
oidc:
enabled: true
issuerUrl: "https://accounts.google.com"
clientId: OBFUSCATED.apps.googleusercontent.com
usernameClaim: "email"
groupsClaim: "groups"
disableSecurityGroupIngress: false
kubelet:
RotateCerts:
enabled: true
stackTags:
Cluster: development
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment