Skip to content

Instantly share code, notes, and snippets.

@cmcconnell1
Created September 14, 2018 18:50
Show Gist options
  • Save cmcconnell1/7451ecda1a04a3f689a3f25e27f91b87 to your computer and use it in GitHub Desktop.
Save cmcconnell1/7451ecda1a04a3f689a3f25e27f91b87 to your computer and use it in GitHub Desktop.
kube-aws new/latest version v0.10.2 cluster.yaml kube2iam CrashLoopBackOff error: level=fatal msg="route ip+net: no such network interface"
clusterName: opsinfra
s3URI: s3://my-bucket-kube-aws-us-west-1/
releaseChannel: stable
amiId: "ami-0a86d340ea7fde077"
disableContainerLinuxAutomaticUpdates: true
apiEndpoints:
- # The unique name of this API endpoint used to identify it inside CloudFormation stacks
name: default
dnsName: opsinfra.myfoo.com
loadBalancer:
subnets:
- name: opsinfra-igw-public-1a
- name: opsinfra-igw-public-1b
private: false
type: classic
recordSetTTL: 300
hostedZone:
id: "XXXXXXXXXX"
recordSetManaged: true
keyName: td-aws-ops
region: us-west-1
kmsKeyArn: "arn:aws:kms:us-west-1:12312312312:key/sasdfasdf-asdfasdfasd-asdfasdf9bf7a5452"
controller:
createTimeout: PT15M
instanceType: m4.large
instanceTags:
instanceRole: controller
rootVolume:
size: 50
type: gp2
autoScalingGroup:
minSize: 2
maxSize: 7
rollingUpdateMinInstancesInService: 3
securityGroupIds:
- sg-myprodsg # PROD
- sg-myprodssh # FOO-SSH
iam:
role:
name: "opsinfraControllerMR"
subnets:
- name: opsinfra-igw-public-1a
- name: opsinfra-igw-public-1b
nodeLabels:
kube-aws.coreos.com/role: controller
worker:
nodePools:
- name: nodepool-1a
instanceType: m4.large
rootVolume:
size: 500
type: gp2
subnets:
- name: opsinfra-igw-public-1a
securityGroupIds:
- sg-myprodsg # PROD
- sg-myprodssh # FOO-SSH
iam:
role:
name: "opsinfraWorkerMR-1A"
autoscaling:
clusterAutoscaler:
enabled: true
autoScalingGroup:
minSize: 1
maxSize: 9
rollingUpdateMinInstancesInService: 3
awsEnvironment:
enabled: true
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
awsNodeLabels:
enabled: true
clusterAutoscalerSupport:
enabled: true
kube2IamSupport:
enabled: true
nodeLabels:
kube-aws.coreos.com/role: worker-1a
- name: nodepool-1b
instanceType: m4.large
rootVolume:
size: 500
type: gp2
subnets:
- name: opsinfra-igw-public-1b
securityGroupIds:
- sg-myprodsg # PROD
- sg-myprodssh # FOO-SSH
iam:
role:
name: "opsinfraWorkerMR-1B"
autoscaling:
clusterAutoscaler:
enabled: true
autoScalingGroup:
minSize: 1
maxSize: 9
rollingUpdateMinInstancesInService: 3
awsEnvironment:
enabled: true
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
awsNodeLabels:
enabled: true
clusterAutoscalerSupport:
enabled: true
kube2IamSupport:
enabled: true
nodeLabels:
kube-aws.coreos.com/role: worker-1b
etcd:
count: 3
instanceType: t2.medium
rootVolume:
size: 50
type: gp2
dataVolume:
size: 50
type: gp2
encrypted: true
subnets:
- name: opsinfra-igw-public-1a
- name: opsinfra-igw-public-1b
snapshot:
automated: true
disasterRecovery:
automated: true
securityGroupIds:
- sg-myprodsg # PROD
- sg-myprodssh # FOO-SSH
vpc:
id: vpc-mynicevpc
vpcCIDR: "10.1.0.0/16"
subnets:
- name: opsinfra-igw-public-1a
private: false
availabilityZone: us-west-1a
instanceCIDR: "10.1.8.64/27"
routeTable:
id: "rtb-c1b87ca9"
- name: opsinfra-igw-public-1b
private: false
availabilityZone: us-west-1b
instanceCIDR: "10.1.8.96/27"
routeTable:
id: "rtb-c1b87ca9"
serviceCIDR: "10.3.0.0/24"
podCIDR: "10.2.0.0/16"
dnsServiceIP: 10.3.0.10
tlsCADurationDays: 3650
tlsCertDurationDays: 365
kubeResourcesAutosave:
enabled: true
kubernetes:
encryptionAtRest:
enabled: false
networking:
selfHosting:
enabled: true # false will fall back to legacy coreos flannel/etcd2 installation
type: canal # either "canal" or "flannel"
typha: false # enable for type 'canal' for 50+ node clusters
kubernetesDashboard:
adminPrivileges: false
insecureLogin: false
enabled: true
kubeDns:
autoscaler:
coresPerReplica: 256
nodesPerReplica: 16
min: 2
kubeProxy:
ipvsMode:
enabled: false
scheduler: rr
syncPeriod: 300s
minSyncPeriod: 60s
addons:
clusterAutoscaler:
enabled: true
rescheduler:
enabled: false
metricsServer:
enabled: false
prometheus:
securityGroupsEnabled: true
experimental:
admission:
podSecurityPolicy:
enabled: false
alwaysPullImages:
enabled: false
denyEscalatingExec:
enabled: false
initializers:
enabled: false
priority:
enabled: false
mutatingAdmissionWebhook:
enabled: false
validatingAdmissionWebhook:
enabled: false
OwnerReferencesPermissionEnforcement:
enabled: false
persistentVolumeClaimResize:
enabled: false
awsEnvironment:
enabled: false
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
auditLog:
enabled: true
maxage: 30
logpath: /dev/stdout
authentication:
webhook:
enabled: false
cacheTTL: 1m0s
configBase64: base64-encoded-webhook-yaml
awsNodeLabels:
enabled: true
tlsBootstrap:
enabled: true
nodeAuthorizer:
enabled: true
ephemeralImageStorage:
enabled: false
kube2IamSupport:
enabled: true
nodeDrainer:
enabled: false
drainTimeout: 5
iamRole:
arn: ""
oidc:
enabled: false
issuerUrl: "https://accounts.google.com"
clientId: "kubernetes"
usernameClaim: "email"
groupsClaim: "groups"
disableSecurityGroupIngress: false
nodeMonitorGracePeriod: "40s"
kubelet:
RotateCerts:
enabled: true
stackTags:
Name: "Kubernetes"
Environment: "PROD"
Function: "opsinfra"
Owner: "Ops"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment