Skip to content

Instantly share code, notes, and snippets.

Created March 3, 2017 18:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save anonymous/051c4b78bed1551eea63789e104faf2b to your computer and use it in GitHub Desktop.
Save anonymous/051c4b78bed1551eea63789e104faf2b to your computer and use it in GitHub Desktop.
---
deployment:
cluster:
- name: joej
network: 10.32.0.0/12
dns: 10.32.0.2
domain: cluster.local
provider: reference('resource.provider', 'aws')
repos:
- name: atlas
url: http://atlas.cnct.io
- name: stable
url: https://kubernetes-charts.storage.googleapis.com
nodes:
etcd:
- name: etcd
clientPorts: [2379, 4001]
clusterToken: espouse-monger-rarely
nodepool: reference('resource.nodepool', 'etcd')
peerPorts: [2380]
ssl: true
version: v3.1.0
containerConfig: dockerconfig
- name: etcdEvents
clientPorts: [2381]
clusterToken: animism-training-chastity
nodepool: reference('resource.nodepool', 'etcdEvents')
peerPorts: [2382]
ssl: true
version: v3.1.0
containerConfig: dockerconfig
master:
- name: master
nodepool: reference('resource.nodepool', 'masterNodes')
loadbalancer: cloud
infra:
etcd: etcd
events:
etcd: etcdEvents
node:
- name: clusterNodes
nodepool: reference('resource.nodepool', 'clusterNodes')
- name: specialNodes
nodepool: reference('resource.nodepool', 'specialNodes')
auth:
user: admin
password:
fabric:
provider: flannel
options:
containers:
kubePolicyController:
version: v0.5.1
location: calico/kube-policy-controller
etcd:
version: v3.0.9
location: quay.io/coreos/etcd
calicoCni:
version: v1.4.2
location: calico/cni
calicoNode:
version: v1.0.0-rc1
location: quay.io/calico/node
flannel:
version: v0.6.1
location: quay.io/coreos/flannel
network:
network: 10.128.0.0/10
subnetLen: 22
subnetMin: 10.128.0.0
subnetMax: 10.191.255.255
backend:
type: vxlan
services:
- name: kubedns
repo: atlas
chart: kubedns
version: 0.1.0
namespace: kube-system
values:
cluster_ip: 10.32.0.2
dns_domain: cluster.local
- name: heapster
repo: atlas
chart: heapster
version: 0.1.0
namespace: kube-system
- name: central-logging
repo: atlas
chart: central-logging
version: 0.2.0
- name: minio
repo: stable
chart: minio
version: 0.0.4
values:
accessKey: cc38db51-14d9-4762-9fae-280ad4106ab8
secretKey: 2cc69531-01f1-4d80-a287-724afa3a44
# Beyond here be dragons
resource:
containerConfig:
- name: dockerconfig
version: 1.12.0
runtime: docker
coreos:
- name: allNodes
version: current
channel: stable
rebootStrategy: off
keypair:
- name: krakenKey
publickeyFile: "$HOME/.ssh/id_rsa.pub"
privatekeyFile: "$HOME/.ssh/id_rsa"
kubeConfig:
- name: krakenKubeConfig
version: v1.5.2
hyperkubeLocation: gcr.io/google_containers/hyperkube
containerConfig: reference('resource.containerConfig', 'dockerconfig')
kubeAuth:
provider:
- name: aws
type: cloudinit
resourcePrefix:
vpc: 10.0.0.0/16
region: us-west-2
configFile: "$HOME/.aws/config"
subnet:
- name: uwswest2a
az: us-west-2a
cidr: 10.0.0.0/18
- name: uwswest2b
az: us-west-2b
cidr: 10.0.64.0/18
- name: uwswest2c
az: us-west-2c
cidr: 10.0.128.0/17
egressAcl:
- protocol: "-1"
rule_no: 100
action: "allow"
cidr_block: "0.0.0.0/0"
from_port: 0
to_port: 0
ingressAcl:
- protocol: "-1"
rule_no: 100
action: "allow"
cidr_block: "0.0.0.0/0"
from_port: 0
to_port: 0
authentication:
accessKey:
accessSecret:
credentialsFile: "$HOME/.aws/credentials"
credentialsProfile: "default"
ingressSecurity:
- from_port: 22
to_port: 22
protocol: "TCP"
cidr_blocks: ["0.0.0.0/0"]
- from_port: 443
to_port: 443
protocol: "TCP"
cidr_blocks: ["0.0.0.0/0"]
egressSecurity:
- from_port: 0
to_port: 0
protocol: "-1"
cidr_blocks: ["0.0.0.0/0"]
nodepool:
- name: etcd
count: 5
mounts:
- device: sdf
path: /var/lib/docker
forceFormat: true
- device: sdg
path: /ephemeral
forceFormat: false
providerConfig:
- name: aws
type: t2.small
subnet: ["uwswest2a", "uwswest2b", "uwswest2c"]
tags:
- key: comments
value: "Cluster etcd"
storage:
- type: ebs_block_device
opts:
device_name: sdf
volume_type: gp2
volume_size: 100
delete_on_termination: true
snapshot_id:
encrypted: false
- type: ebs_block_device
opts:
device_name: sdg
volume_type: gp2
volume_size: 10
delete_on_termination: true
snapshot_id:
encrypted: false
keypair: krakenKey
coreos: reference('resource.coreos', 'allNodes')
- name: etcdEvents
count: 5
mounts:
- device: sdf
path: /var/lib/docker
forceFormat: true
- device: sdg
path: /ephemeral
forceFormat: false
providerConfig:
- name: aws
type: t2.small
subnet: ["uwswest2a", "uwswest2b", "uwswest2c"]
tags:
- key: comments
value: "Cluster events etcd"
storage:
- type: ebs_block_device
opts:
device_name: sdf
volume_type: gp2
volume_size: 100
delete_on_termination: true
snapshot_id:
encrypted: false
- type: ebs_block_device
opts:
device_name: sdg
volume_type: gp2
volume_size: 10
delete_on_termination: true
snapshot_id:
encrypted: false
keypair: krakenKey
coreos: reference('resource.coreos', 'allNodes')
- name: masterNodes
count: 3
mounts:
- device: sdf
path: /var/lib/docker
forceFormat: true
providerConfig:
- name: aws
type: m3.medium
subnet: ["uwswest2a", "uwswest2b", "uwswest2c"]
tags:
- key: comments
value: "Master instances"
storage:
- type: ebs_block_device
opts:
device_name: sdf
volume_type: gp2
volume_size: 100
delete_on_termination: true
snapshot_id:
encrypted: false
keypair: reference('resource.keypair', 'krakenKey')
kubeConfig: reference('resource.kubeConfig', 'krakenKubeConfig')
coreos: reference('resource.coreos', 'allNodes')
- name: clusterNodes
count: 3
keypair: reference('resource.keypair', 'krakenKey')
mounts:
- device: sdf
path: /var/lib/docker
forceFormat: true
providerConfig:
- name: aws
type: c4.large
subnet: ["uwswest2a", "uwswest2b", "uwswest2c"]
tags:
- key: comments
value: "Cluster plain nodes"
storage:
- type: ebs_block_device
opts:
device_name: sdf
volume_type: gp2
volume_size: 100
delete_on_termination: true
snapshot_id:
encrypted: false
kubeConfig: reference('resource.kubeConfig', 'krakenKubeConfig')
coreos: reference('resource.coreos', 'allNodes')
- name: specialNodes
count: 2
mounts:
- device: sdf
path: /var/lib/docker
forceFormat: true
keypair: reference('resource.keypair', 'krakenKey')
providerConfig:
- name: aws
type: m3.medium
subnet: ["uwswest2a", "uwswest2b", "uwswest2c"]
tags:
- key: comments
value: "Cluster special nodes"
storage:
- type: ebs_block_device
opts:
device_name: sdf
volume_type: gp2
volume_size: 100
delete_on_termination: true
snapshot_id:
encrypted: false
kubeConfig: reference('resource.kubeConfig', 'krakenKubeConfig')
coreos: reference('resource.coreos', 'allNodes')
readiness:
type: exact
value: 0
wait: 600
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment