Skip to content

Instantly share code, notes, and snippets.

@aleks-mariusz
Last active July 30, 2018 08:36
Show Gist options
  • Save aleks-mariusz/01dc8b5ff2c76e628258af95c64cbdb8 to your computer and use it in GitHub Desktop.
Save aleks-mariusz/01dc8b5ff2c76e628258af95c64cbdb8 to your computer and use it in GitHub Desktop.
kubernetes v1.11.1 - HA setup scripts for centos 7.x used (kickstart file used: https://gist.github.com/aleks-mariusz/f01f3177a13f1e86e4088e5b6c7f278d) - need to also set up keepalived on 2nd/3rd node and replace socat with nginx
#!/bin/sh
INTERFACE=eth0
NETMASK_CIDR='/24' # 255.255.255.0
LOAD_BALANCER_IP=192.168.1.100
LOAD_BALANCER_DNS=api-k8s-lab
LOAD_BALANCER_PORT=6443
CP_PORT=6443
CP0_HOSTNAME=k8s-lab1
CP0_IP=192.168.1.101
CP1_HOSTNAME=k8s-lab2
CP1_IP=192.168.1.102
CP2_HOSTNAME=k8s-lab3
CP2_IP=192.168.1.103
set -x
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y docker
setenforce 0
sed -i -e 's/^SELINUX=[a-z]*/SELINUX=disabled/' /etc/sysconfig/selinux
sed -i -e 's/^SELINUX=[a-z]*/SELINUX=disabled/' /etc/selinux/config
yum install -y kubelet kubeadm kubectl
systemctl start docker && systemctl enable docker
echo net.bridge.bridge-nf-call-ip6tables=1 > /etc/sysctl.d/k8s.conf
echo net.bridge.bridge-nf-call-iptables=1 >> /etc/sysctl.d/k8s.conf
echo net.ipv4.ip_nonlocal_bind=1 > /etc/sysctl.d/nginx.conf
sysctl --system
docker run -d --net=host --cap-add NET_ADMIN \
--name k8s-api-keepalived --restart=always \
-e KEEPALIVED_AUTOCONF=true \
-e KEEPALIVED_STATE=MASTER \
-e KEEPALIVED_INTERFACE=$INTERFACE \
-e KEEPALIVED_VIRTUAL_ROUTER_ID=2 \
-e KEEPALIVED_UNICAST_PEER_1=$CP1_IP \
-e KEEPALIVED_UNICAST_PEER_2=$CP2_IP \
-e KEEPALIVED_TRACK_INTERFACE_1=$INTERFACE \
-e KEEPALIVED_VIRTUAL_IPADDRESS_1="$LOAD_BALANCER_IP$NETMASK_CIDR dev $INTERFACE" \
arcts/keepalived
mkdir /etc/nginx
cat >/etc/nginx/nginx.conf <<_EOF
worker_processes auto;
error_log stderr warn;
events {
worker_connections 1024;
}
stream {
upstream apiserver {
server $CP0_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
server $CP1_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
server $CP2_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
}
server {
listen $LOAD_BALANCER_IP:$LOAD_BALANCER_PORT;
proxy_connect_timeout 4s;
proxy_timeout 90s;
proxy_pass apiserver;
}
}
_EOF
docker run -d --net=host --memory=512M -v /etc/nginx:/etc/nginx \
--name k8s-api-proxy --restart=always \
-p $LOAD_BALANCER_IP:$LOAD_BALANCER_PORT:$LOAD_BALANCER_PORT \
nginx:alpine
cat > kubeadm-config.yaml <<_EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.1
apiServerCertSANs:
- "$LOAD_BALANCER_DNS"
api:
controlPlaneEndpoint: "$LOAD_BALANCER_DNS:$LOAD_BALANCER_PORT"
bindAddress: $CP0_IP
apiServerExtraArgs:
bind-address: $CP0_IP
etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://$CP0_IP:2379"
advertise-client-urls: "https://$CP0_IP:2379"
listen-peer-urls: "https://$CP0_IP:2380"
initial-advertise-peer-urls: "https://$CP0_IP:2380"
initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380"
serverCertSANs:
- $CP0_HOSTNAME
- $CP0_IP
peerCertSANs:
- $CP0_HOSTNAME
- $CP0_IP
kubeletConfiguration:
baseConfig:
clusterDomain: k8s-lab.our.local.domain
networking:
dnsDomain: k8s-lab.our.local.domain
podSubnet: "10.32.0.0/12" # default for weave cni
_EOF
kubeadm init --config kubeadm-config.yaml --ignore-preflight-errors=Port-6443
# work around bug where kubelet starts but host stays NotReady because networking doesn't start, with kubelet's log showing:
# Jul 26 15:20:27 k8s-lab1 kubelet[1584]: W0726 15:20:27.407413 1584 cni.go:172] Unable to update cni config: No networks found in /etc/cni/net.d
# Jul 26 15:20:27 k8s-lab1 kubelet[1584]: E0726 15:20:27.408491 1584 kubelet.go:2110] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: netwo...g uninitialized
if [[ $(grep -c -- '--cni-conf-dir=/etc/cni' /var/lib/kubelet/kubeadm-flags.env) -eq 1 && ! -d /etc/cni ]]; then
pushd /var/lib/kubelet/
awk '{print $1}' < kubeadm-flags.env > kubeadm-flags_fixed.env
mv kubeadm-flags{,_broken}.env
mv kubeadm-flags{_fixed,}.env
popd
fi
systemctl enable kubelet && systemctl restart kubelet
mkdir .kube
ln -s /etc/kubernetes/admin.conf .kube/config
tar -C /etc/kubernetes -cvf kube-keys.tar admin.conf pki/{ca.{crt,key},sa.{key,pub},front-proxy-ca.{crt,key},etcd/ca.{crt,key}}
scp kube-keys.tar $CP1_HOSTNAME:
scp kube-keys.tar $CP2_HOSTNAME:
#!/bin/sh
INTERFACE=eth0
NETMASK_CIDR='/24' # 255.255.255.0
LOAD_BALANCER_IP=192.168.1.100
LOAD_BALANCER_DNS=api-k8s-lab
LOAD_BALANCER_PORT=6443
CP_PORT=6443
CP0_HOSTNAME=k8s-lab1
CP0_IP=192.168.1.101
CP1_HOSTNAME=k8s-lab2
CP1_IP=192.168.1.102
CP2_IP=192.168.1.103
set -x
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y docker
setenforce 0
sed -i -e 's/^SELINUX=[a-z]*/SELINUX=disabled/' /etc/sysconfig/selinux
sed -i -e 's/^SELINUX=[a-z]*/SELINUX=disabled/' /etc/selinux/config
yum install -y kubelet kubeadm kubectl
systemctl start docker && systemctl enable docker
echo net.bridge.bridge-nf-call-ip6tables=1 > /etc/sysctl.d/k8s.conf
echo net.bridge.bridge-nf-call-iptables=1 >> /etc/sysctl.d/k8s.conf
echo net.ipv4.ip_nonlocal_bind=1 > /etc/sysctl.d/nginx.conf
sysctl --system
docker run -d --net=host --cap-add NET_ADMIN \
--name k8s-api-keepalived --restart=always \
-e KEEPALIVED_AUTOCONF=true \
-e KEEPALIVED_STATE=BACKUP \
-e KEEPALIVED_INTERFACE=$INTERFACE \
-e KEEPALIVED_VIRTUAL_ROUTER_ID=2 \
-e KEEPALIVED_UNICAST_PEER_0=$CP0_IP \
-e KEEPALIVED_UNICAST_PEER_2=$CP2_IP \
-e KEEPALIVED_TRACK_INTERFACE_1=$INTERFACE \
-e KEEPALIVED_VIRTUAL_IPADDRESS_1="$LOAD_BALANCER_IP$NETMASK_CIDR dev $INTERFACE" \
arcts/keepalived
mkdir /etc/nginx
cat >/etc/nginx/nginx.conf <<_EOF
worker_processes auto;
error_log stderr warn;
events {
worker_connections 1024;
}
stream {
upstream apiserver {
server $CP0_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
server $CP1_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
server $CP2_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
}
server {
listen $LOAD_BALANCER_IP:$LOAD_BALANCER_PORT;
proxy_connect_timeout 4s;
proxy_timeout 90s;
proxy_pass apiserver;
}
}
_EOF
docker run -d --net=host --memory=512M -v /etc/nginx:/etc/nginx \
--name k8s-api-proxy --restart=always \
-p $LOAD_BALANCER_IP:$LOAD_BALANCER_PORT:$LOAD_BALANCER_PORT \
nginx:alpine
cat > kubeadm-config.yaml <<_EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.1
apiServerCertSANs:
- "$LOAD_BALANCER_DNS"
api:
controlPlaneEndpoint: "$LOAD_BALANCER_DNS:$LOAD_BALANCER_PORT"
bindAddress: $CP1_IP
apiServerExtraArgs:
bind-address: $CP1_IP
etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://$CP1_IP:2379"
advertise-client-urls: "https://$CP1_IP:2379"
listen-peer-urls: "https://$CP1_IP:2380"
initial-advertise-peer-urls: "https://$CP1_IP:2380"
initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380,$CP1_HOSTNAME=https://$CP1_IP:2380"
initial-cluster-state: existing
serverCertSANs:
- $CP1_HOSTNAME
- $CP1_IP
peerCertSANs:
- $CP1_HOSTNAME
- $CP1_IP
kubeletConfiguration:
baseConfig:
clusterDomain: k8s-lab.our.local.domain
networking:
dnsDomain: k8s-lab.our.local.domain
podSubnet: "10.32.0.0/12" # default for weave cni
_EOF
if ! [[ -f kube-keys.tar ]]; then
echo 'ERROR: tarball containing pki keys and admin.conf not copied here!'
fi
tar -C /etc/kubernetes -xvf kube-keys.tar
kubeadm alpha phase certs all --config kubeadm-config.yaml
kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml
kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml
kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml
# work around bug where kubelet starts but host stays NotReady because networking doesn't start, with kubelet's log showing:
# Jul 26 15:20:27 k8s-lab1 kubelet[1584]: W0726 15:20:27.407413 1584 cni.go:172] Unable to update cni config: No networks found in /etc/cni/net.d
# Jul 26 15:20:27 k8s-lab1 kubelet[1584]: E0726 15:20:27.408491 1584 kubelet.go:2110] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: netwo...g uninitialized
if [[ $(grep -c -- '--cni-conf-dir=/etc/cni' /var/lib/kubelet/kubeadm-flags.env) -eq 1 && ! -d /etc/cni ]]; then
pushd /var/lib/kubelet/
awk '{print $1}' < kubeadm-flags.env > kubeadm-flags_fixed.env
mv kubeadm-flags{,_broken}.env
mv kubeadm-flags{_fixed,}.env
popd
fi
systemctl enable kubelet && systemctl restart kubelet
mkdir .kube
ln -s /etc/kubernetes/admin.conf .kube/config
kubectl exec -n kube-system etcd-$CP0_HOSTNAME -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://$CP0_IP:2379 member add $CP1_HOSTNAME https://$CP1_IP:2380
kubeadm alpha phase etcd local --config kubeadm-config.yaml
kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml
kubeadm alpha phase controlplane all --config kubeadm-config.yaml
kubeadm alpha phase mark-master --config kubeadm-config.yaml
#!/bin/sh
INTERFACE=eth0
NETMASK_CIDR='/24' # 255.255.255.0
LOAD_BALANCER_IP=192.168.1.100
LOAD_BALANCER_DNS=api-k8s-lab
LOAD_BALANCER_PORT=6443
CP_PORT=6443
CP0_HOSTNAME=k8s-lab1
CP0_IP=192.168.1.101
CP1_HOSTNAME=k8s-lab2
CP1_IP=192.168.1.102
CP2_HOSTNAME=k8s-lab3
CP2_IP=192.168.1.103
set -x
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y docker
setenforce 0
sed -i -e 's/^SELINUX=[a-z]*/SELINUX=disabled/' /etc/sysconfig/selinux
sed -i -e 's/^SELINUX=[a-z]*/SELINUX=disabled/' /etc/selinux/config
yum install -y kubelet kubeadm kubectl
systemctl start docker && systemctl enable docker
echo net.bridge.bridge-nf-call-ip6tables=1 > /etc/sysctl.d/k8s.conf
echo net.bridge.bridge-nf-call-iptables=1 >> /etc/sysctl.d/k8s.conf
echo net.ipv4.ip_nonlocal_bind=1 > /etc/sysctl.d/nginx.conf
sysctl --system
docker run -d --net=host --cap-add NET_ADMIN \
--name k8s-api-keepalived --restart=always \
-e KEEPALIVED_AUTOCONF=true \
-e KEEPALIVED_STATE=BACKUP \
-e KEEPALIVED_INTERFACE=$INTERFACE \
-e KEEPALIVED_VIRTUAL_ROUTER_ID=2 \
-e KEEPALIVED_UNICAST_PEER_0=$CP0_IP \
-e KEEPALIVED_UNICAST_PEER_1=$CP1_IP \
-e KEEPALIVED_TRACK_INTERFACE_1=$INTERFACE \
-e KEEPALIVED_VIRTUAL_IPADDRESS_1="$LOAD_BALANCER_IP$NETMASK_CIDR dev $INTERFACE" \
arcts/keepalived
mkdir /etc/nginx
cat >/etc/nginx/nginx.conf <<_EOF
worker_processes auto;
error_log stderr warn;
events {
worker_connections 1024;
}
stream {
upstream apiserver {
server $CP0_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
server $CP1_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
server $CP2_IP:$CP_PORT weight=5 max_fails=3 fail_timeout=30s;
}
server {
listen $LOAD_BALANCER_IP:$LOAD_BALANCER_PORT;
proxy_connect_timeout 4s;
proxy_timeout 90s;
proxy_pass apiserver;
}
}
_EOF
docker run -d --net=host --memory=512M -v /etc/nginx:/etc/nginx \
--name k8s-api-proxy --restart=always \
-p $LOAD_BALANCER_IP:$LOAD_BALANCER_PORT:$LOAD_BALANCER_PORT \
nginx:alpine
cat > kubeadm-config.yaml <<_EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.1
apiServerCertSANs:
- "$LOAD_BALANCER_DNS"
api:
controlPlaneEndpoint: "$LOAD_BALANCER_DNS:$LOAD_BALANCER_PORT"
bindAddress: $CP2_IP
apiServerExtraArgs:
bind-address: $CP2_IP
etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://$CP2_IP:2379"
advertise-client-urls: "https://$CP2_IP:2379"
listen-peer-urls: "https://$CP2_IP:2380"
initial-advertise-peer-urls: "https://$CP2_IP:2380"
initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380,$CP1_HOSTNAME=https://$CP1_IP:2380,$CP2_HOSTNAME=https://$CP2_IP:2380"
initial-cluster-state: existing
serverCertSANs:
- $CP2_HOSTNAME
- $CP2_IP
peerCertSANs:
- $CP2_HOSTNAME
- $CP2_IP
kubeletConfiguration:
baseConfig:
clusterDomain: k8s-lab.our.local.domain
networking:
dnsDomain: k8s-lab.our.local.domain
podSubnet: "10.32.0.0/12" # default for weave cni
_EOF
if ! [[ -f kube-keys.tar ]]; then
echo 'ERROR: tarball containing pki keys and admin.conf not copied here!'
fi
tar -C /etc/kubernetes -xvf kube-keys.tar
kubeadm alpha phase certs all --config kubeadm-config.yaml
kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml
kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml
kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml
# work around bug where kubelet starts but host stays NotReady because networking doesn't start, with kubelet's log showing:
# Jul 26 15:20:27 k8s-lab1 kubelet[1584]: W0726 15:20:27.407413 1584 cni.go:172] Unable to update cni config: No networks found in /etc/cni/net.d
# Jul 26 15:20:27 k8s-lab1 kubelet[1584]: E0726 15:20:27.408491 1584 kubelet.go:2110] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: netwo...g uninitialized
if [[ $(grep -c -- '--cni-conf-dir=/etc/cni' /var/lib/kubelet/kubeadm-flags.env) -eq 1 && ! -d /etc/cni ]]; then
pushd /var/lib/kubelet/
awk '{print $1}' < kubeadm-flags.env > kubeadm-flags_fixed.env
mv kubeadm-flags{,_broken}.env
mv kubeadm-flags{_fixed,}.env
popd
fi
systemctl enable kubelet && systemctl restart kubelet
mkdir .kube
ln -s /etc/kubernetes/admin.conf .kube/config
kubectl exec -n kube-system etcd-$CP0_HOSTNAME -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://$CP0_IP:2379 member add $CP2_HOSTNAME https://$CP2_IP:2380
kubeadm alpha phase etcd local --config kubeadm-config.yaml
kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml
kubeadm alpha phase controlplane all --config kubeadm-config.yaml
kubeadm alpha phase mark-master --config kubeadm-config.yaml
kubectl taint nodes --all node-role.kubernetes.io/master-
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment