Skip to content

Instantly share code, notes, and snippets.

@rawc0der
Last active April 9, 2016 16:22
Show Gist options
  • Save rawc0der/90bd13c6a2736d7cf8d3bb7fb2a252cf to your computer and use it in GitHub Desktop.
Save rawc0der/90bd13c6a2736d7cf8d3bb7fb2a252cf to your computer and use it in GitHub Desktop.
CoreOS + Kubernetes 3 machine cluster deployment
#cloud-config
hostname: "coreos.local.1"
ssh_authorized_keys:
- ...
coreos:
etcd2:
# generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3
# specify the initial size of your cluster with ?size=X
# discovery: https://discovery.etcd.io/64cdc949e85e49cbabe1f019b6c95408
# multi-region and multi-cloud deployments need to use $public_ipv4
advertise-client-urls: http://192.168.1.10:2379,http://192.168.1.10:4001
initial-advertise-peer-urls: http://192.168.1.10:2380
# listen on both the official ports and the legacy ports
# legacy ports can be omitted if your application doesn't depend on them
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
listen-peer-urls: http://192.168.1.10:2380
name: master1
initial-cluster: master1=http://192.168.1.10:2380,slave1=http://192.168.1.11:2380,slave2=http://192.168.1.12:2380
initial-cluster-state: new
initial-cluster-token: syn-etcd-cluster1
units:
- name: etcd2.service
command: start
- name: fleet.service
command: start
- name: 00-enp0s3.network
runtime: true
content: |
[Match]
Name=enp0s3
[Network]
Address=192.168.1.10/24
Gateway=192.168.1.1
DNS=8.8.8.8
DNS=8.8.4.4
- name: flanneld.service
drop-ins:
- name: 50-network-config.conf
content: |
[Unit]
Requires=etcd2.service
After=etcd2.service
[Service]
ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "10.2.0.0/16" }'
- name: 40-ExecStartPre-symlink.conf
content: |
[Service]
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
command: start
- name: docker.service
drop-ins:
- name: 40-flannel.conf
content: |
[Unit]
Requires=flanneld.service
After=flanneld.service
write_files:
- path: /srv/kubernetes/manifests/kube-scheduler.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: quay.io/coreos/hyperkube:v1.1.8_coreos.0
command:
- /hyperkube
- scheduler
- --master=http://127.0.0.1:8080
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
initialDelaySeconds: 15
timeoutSeconds: 1
- path: /srv/kubernetes/manifests/kube-controller-manager.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-controller-manager
image: quay.io/coreos/hyperkube:v1.1.8_coreos.0
command:
- /hyperkube
- controller-manager
- --master=http://127.0.0.1:8080
- --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- --root-ca-file=/etc/kubernetes/ssl/ca.pem
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
initialDelaySeconds: 15
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- path: /etc/kubernetes/manifests/kube-podmaster.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-podmaster
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: scheduler-elector
image: gcr.io/google_containers/podmaster:1.1
command:
- /podmaster
- --etcd-servers=http://192.168.1.10:2379,http://192.168.1.11:2379,http://192.168.1.12:2379
- --key=scheduler
- --whoami=192.168.1.10
- --source-file=/src/manifests/kube-scheduler.yaml
- --dest-file=/dst/manifests/kube-scheduler.yaml
volumeMounts:
- mountPath: /src/manifests
name: manifest-src
readOnly: true
- mountPath: /dst/manifests
name: manifest-dst
- name: controller-manager-elector
image: gcr.io/google_containers/podmaster:1.1
command:
- /podmaster
- --etcd-servers=http://192.168.1.10:2379,http://192.168.1.11:2379,http://192.168.1.12:2379
- --key=controller
- --whoami=192.168.1.10
- --source-file=/src/manifests/kube-controller-manager.yaml
- --dest-file=/dst/manifests/kube-controller-manager.yaml
terminationMessagePath: /dev/termination-log
volumeMounts:
- mountPath: /src/manifests
name: manifest-src
readOnly: true
- mountPath: /dst/manifests
name: manifest-dst
volumes:
- hostPath:
path: /srv/kubernetes/manifests
name: manifest-src
- hostPath:
path: /etc/kubernetes/manifests
name: manifest-dst
- path: /etc/kubernetes/manifests/kube-proxy.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-proxy
image: quay.io/coreos/hyperkube:v1.1.8_coreos.0
command:
- /hyperkube
- proxy
- --master=http://127.0.0.1:8080
- --proxy-mode=iptables
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- path: /etc/kubernetes/manifests/kube-apiserver.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: quay.io/coreos/hyperkube:v1.1.8_coreos.0
command:
- /hyperkube
- apiserver
- --bind-address=0.0.0.0
- --etcd-servers=http://192.168.1.10:2379,http://192.168.1.11:2379,http://192.168.1.12:2379
- --allow-privileged=true
- --service-cluster-ip-range=10.3.0.0/24
- --secure-port=443
- --advertise-address=192.168.1.10
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
- --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem
- --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- --client-ca-file=/etc/kubernetes/ssl/ca.pem
- --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem
ports:
- containerPort: 443
hostPort: 443
name: https
- containerPort: 8080
hostPort: 8080
name: local
volumeMounts:
- mountPath: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- path: /etc/systemd/system/kubelet.service
permissions: "0644"
owner: "root"
content: |
[Service]
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
Environment=KUBELET_VERSION=v1.1.8_coreos.0
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--api_servers=http://127.0.0.1:8080 \
--register-node=false \
--allow-privileged=true \
--config=/etc/kubernetes/manifests \
--hostname-override=192.168.1.10 \
--cluster-dns=10.3.0.10 \
--cluster-domain=cluster.local
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- path: /etc/hosts
permissions: "0644"
owner: "root"
content: |
127.0.0.1 localhost
192.168.1.10 kube-worker-1
192.168.1.11 kube-worker-2
192.168.1.12 kube-worker-3
- path: /etc/flannel/options.env
permissions: "0644"
owner: "root"
content: |
FLANNELD_IFACE=192.168.1.10
FLANNELD_ETCD_ENDPOINTS=http://192.168.1.10:2379,http://192.168.1.11:2379,http://192.168.1.12:2379
#cloud-config
hostname: "coreos.local.2"
ssh_authorized_keys:
- ...
coreos:
etcd2:
# generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3
# specify the initial size of your cluster with ?size=X
# discovery: https://discovery.etcd.io/64cdc949e85e49cbabe1f019b6c95408
# multi-region and multi-cloud deployments need to use $public_ipv4
advertise-client-urls: http://192.168.1.11:2379,http://192.168.1.11:4001
initial-advertise-peer-urls: http://192.168.1.11:2380
# listen on both the official ports and the legacy ports
# legacy ports can be omitted if your application doesn't depend on them
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
listen-peer-urls: http://192.168.1.11:2380
name: slave1
initial-cluster: master1=http://192.168.1.10:2380,slave1=http://192.168.1.11:2380,slave2=http://192.168.1.12:2380
initial-cluster-state: new
initial-cluster-token: syn-etcd-cluster1
units:
- name: etcd2.service
command: start
- name: fleet.service
command: start
- name: 00-enp0s3.network
runtime: true
content: |
[Match]
Name=enp0s3
[Network]
Address=192.168.1.11/24
Gateway=192.168.1.1
DNS=8.8.8.8
DNS=8.8.4.4
- name: flanneld.service
drop-ins:
- name: 40-ExecStartPre-symlink.conf
content: |
[Service]
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
- name: docker.service
drop-ins:
- name: 40-flannel.conf
content: |
[Unit]
Requires=flanneld.service
After=flanneld.service
write_files:
- path: /etc/kubernetes/worker-kubeconfig.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/worker.pem
client-key: /etc/kubernetes/ssl/worker-key.pem
contexts:
- context:
cluster: local
user: kubelet
name: kubelet-context
current-context: kubelet-context
- path: /etc/kubernetes/manifests/kube-proxy.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-proxy
image: quay.io/coreos/hyperkube:v1.1.8_coreos.0
command:
- /hyperkube
- proxy
- --master=https://192.168.1.10
- --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml
- --proxy-mode=iptables
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: "ssl-certs"
- mountPath: /etc/kubernetes/worker-kubeconfig.yaml
name: "kubeconfig"
readOnly: true
- mountPath: /etc/kubernetes/ssl
name: "etc-kube-ssl"
readOnly: true
volumes:
- name: "ssl-certs"
hostPath:
path: "/usr/share/ca-certificates"
- name: "kubeconfig"
hostPath:
path: "/etc/kubernetes/worker-kubeconfig.yaml"
- name: "etc-kube-ssl"
hostPath:
path: "/etc/kubernetes/ssl"
- path: /etc/systemd/system/kubelet.service
permissions: "0644"
owner: "root"
content: |
[Service]
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
Environment=KUBELET_VERSION=v1.1.8_coreos.0
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--api_servers=https://192.168.1.10 \
--register-node=true \
--allow-privileged=true \
--config=/etc/kubernetes/manifests \
--hostname-override=192.168.1.11 \
--cluster-dns=10.3.0.10 \
--cluster-domain=cluster.local \
--kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \
--tls-cert-file=/etc/kubernetes/ssl/worker.pem \
--tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- path: /etc/hosts
content: |
127.0.0.1 localhost
192.168.1.10 kube-worker-1
192.168.1.11 kube-worker-2
192.168.1.12 kube-worker-3
- path: /etc/flannel/options.env
permissions: "0644"
owner: "root"
content: |
FLANNELD_IFACE=192.168.1.11
FLANNELD_ETCD_ENDPOINTS=http://192.168.1.10:2379,http://192.168.1.11:2379,http://192.168.1.12:2379
#cloud-config
hostname: "coreos.local.3"
ssh_authorized_keys:
- ...
coreos:
etcd2:
# generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3
# specify the initial size of your cluster with ?size=X
# discovery: https://discovery.etcd.io/64cdc949e85e49cbabe1f019b6c95408
# multi-region and multi-cloud deployments need to use $public_ipv4
advertise-client-urls: http://192.168.1.12:2379,http://192.168.1.12:4001
initial-advertise-peer-urls: http://192.168.1.12:2380
# listen on both the official ports and the legacy ports
# legacy ports can be omitted if your application doesn't depend on them
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
listen-peer-urls: http://192.168.1.12:2380
name: slave2
initial-cluster: master1=http://192.168.1.10:2380,slave1=http://192.168.1.11:2380,slave2=http://192.168.1.12:2380
initial-cluster-state: new
initial-cluster-token: syn-etcd-cluster1
units:
- name: etcd2.service
command: start
- name: fleet.service
command: start
- name: 00-enp0s3.network
runtime: true
content: |
[Match]
Name=enp0s3
[Network]
Address=192.168.1.12/24
Gateway=192.168.1.1
DNS=8.8.8.8
DNS=8.8.4.4
- name: flanneld.service
drop-ins:
- name: 40-ExecStartPre-symlink.conf
content: |
[Service]
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
- name: docker.service
drop-ins:
- name: 40-flannel.conf
content: |
[Unit]
Requires=flanneld.service
After=flanneld.service
write_files:
- path: /etc/kubernetes/worker-kubeconfig.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/worker.pem
client-key: /etc/kubernetes/ssl/worker-key.pem
contexts:
- context:
cluster: local
user: kubelet
name: kubelet-context
current-context: kubelet-context
- path: /etc/kubernetes/manifests/kube-proxy.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-proxy
image: quay.io/coreos/hyperkube:v1.1.8_coreos.0
command:
- /hyperkube
- proxy
- --master=https://192.168.1.10
- --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml
- --proxy-mode=iptables
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: "ssl-certs"
- mountPath: /etc/kubernetes/worker-kubeconfig.yaml
name: "kubeconfig"
readOnly: true
- mountPath: /etc/kubernetes/ssl
name: "etc-kube-ssl"
readOnly: true
volumes:
- name: "ssl-certs"
hostPath:
path: "/usr/share/ca-certificates"
- name: "kubeconfig"
hostPath:
path: "/etc/kubernetes/worker-kubeconfig.yaml"
- name: "etc-kube-ssl"
hostPath:
path: "/etc/kubernetes/ssl"
- path: /etc/systemd/system/kubelet.service
permissions: "0644"
owner: "root"
content: |
[Service]
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
Environment=KUBELET_VERSION=v1.1.8_coreos.0
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--api_servers=https://192.168.1.10 \
--register-node=true \
--allow-privileged=true \
--config=/etc/kubernetes/manifests \
--hostname-override=192.168.1.12 \
--cluster-dns=10.3.0.10 \
--cluster-domain=cluster.local \
--kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \
--tls-cert-file=/etc/kubernetes/ssl/worker.pem \
--tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- path: /etc/hosts
content: |
127.0.0.1 localhost
192.168.1.10 kube-worker-1
192.168.1.11 kube-worker-2
192.168.1.12 kube-worker-3
- path: /etc/flannel/options.env
permissions: "0644"
owner: "root"
content: |
FLANNELD_IFACE=192.168.1.12
FLANNELD_ETCD_ENDPOINTS=http://192.168.1.10:2379,http://192.168.1.11:2379,http://192.168.1.12:2379
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment