Skip to content

Instantly share code, notes, and snippets.

@smitjainsj
Created November 18, 2021 17:16
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save smitjainsj/2437a7d2d36bfb8fcd17fbea620b7dea to your computer and use it in GitHub Desktop.
Save smitjainsj/2437a7d2d36bfb8fcd17fbea620b7dea to your computer and use it in GitHub Desktop.
Kubernetes - Multi Vagrant Setup

Kubernetes Installation on Vagrant

Version Matrix

kubectl: 1.19.0
kubelet: 1.19.0
kubeadm: 1.19.0
containerd: 1.4.x
docker: 19.03
kubernetes: 1.19.0

# Docker 19 is compatible with Kubeadm 1.19 and below.
# Docker 20 onwards dockershim.sock is deperecated and CRI
# should be updated to containerd.sock file path.

Add kernel modules

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
EOF

sudo sysctl --system

Add docker repo

sudo yum install -y epel-release
sudo yum install -y yum-utils device-mapper-persistent-data lvm2 iproute deltarpm

sudo yum remove -y *docker* *container* cri-tools

sudo yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo
sudo yum clean all

VERSION="19.03.15"
sudo yum install -y docker-ce-$VERSION docker-ce-cli-$VERSION containerd.io
sudo systemctl enable docker
sudo mkdir -p /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2"
}
EOF
systemctl restart docker

Install and configure kubeadm on all nodes

cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF

# installing 1.18 version
# yum list --showduplicates kubeadm --disableexcludes=kubernetes
VERSION="1.19.0"
sudo yum install -y kubeadm-$VERSION kubectl-$VERSION kubelet-$VERSION  kubernetes-cni --disableexcludes=kubernetes
sudo systemctl enable kubelet

Only on control plan nodes

# kubeadm-config.yaml
cat <<EOF | sudo tee kubeadm-config.yaml
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: "0"
  usages:
  - signing
  - authentication
localAPIEndpoint:
  advertiseAddress: 192.168.56.51
  bindPort: 6443
nodeRegistration:
#  criSocket: /run/containerd/containerd.sock # this will default to dockershim
  name: cp1.example.com
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
networking:
  dnsDomain: example.com
  podSubnet: 10.244.0.0/22
  serviceSubnet: 10.96.0.0/22
scheduler: {}
EOF

# Execute the commaon only on the control plane node
kubeadm init --config kubeadm-config.yaml

Deploy Network Add On, only on control plane nodes

# For now I will deploy flannel
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# Update the kube-flannel.yaml with correct ip address from above.
# Setup your kubeconfig file and deploy once on the CP node before joining the workers
kubectl apply -f kube-flannel.yaml

Join worker nodes

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubeadm join 192.168.56.51:6443 --token "abcdef.0123456789abcdef" \
  --discovery-token-ca-cert-hash sha256:xxxxx

Kubeadm Destroy

sudo kubeadm reset
sudo rm -rf /etc/kubernetes /var/lib/etcd /var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni /etc/cni/net.d

How to changes certs in the kubernetes using kubeadm

# Generate kubeadm.yaml
kubectl -n kube-system get configmap kubeadm-config -o jsonpath='{.data.ClusterConfiguration}' > kubeadm.yaml
# Update the SAN in the yaml file

apiServer:
  extraArgs:
    authorization-mode: Node,RBAC
  timeoutForControlPlane: 4m0s
  certSANs:
  - kubernetes.example.com
  - "x.x.x.x"
  - cp1.example.com
  - "192.168.56.51"

mv /etc/kubernetes/pki/apiserver.{crt,key} ~
kubeadm init phase certs apiserver --config kubeadm.yaml

#The final step is restarting the API server to pick up the new certificate. The easiest way to do this is to kill the API server container using docker:
Run `docker ps | grep kube-apiserver | grep -v pause` to get the container ID for the container running the Kubernetes API server. (The container ID will be the very first field in the output.)
Run `docker kill <containerID>` to kill the container.

#If your nodes are running containerd as the container runtime, the commands are a bit different:
Run `crictl pods | grep kube-apiserver | cut -d' ' -f1` to get the Pod ID for the Kubernetes API server Pod.
Run `crictl stopp <pod-id>` to stop the Pod.
Run `crictl rmp <pod-id>` to remove the Pod.

# verify the change
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text

# update cluster configurations
kubectl -n kube-system get configmap kubeadm-config -o yaml
kubectl -n kube-system describe  configmap kubeadm-config

Upgrade steps

# upgrade the kubeadm to the desired version
# upgrade from 1.18 to 1.19.1
# On MASTER NODE
VERSION="1.19.1"
yum install -y kubeadm-$VERSION kubectl-$VERSION kubelet-$VERSION --disableexcludes=kubernetes
kubeadm upgrade plan
kubeadm upgrade apply v1.19.1
systemctl daemon-reload ; sleep 5 ; 
systemctl restart kubelet

# Worker node ugprades, perform the following from master node
k get nodes
NAME                            STATUS   ROLES    AGE   VERSION
cp1.example.com           Ready    master   41d   v1.19.1
k8s-worker2.example.com   Ready    <none>   41d   v1.18.20
k8s-worker3.example.com   Ready    <none>   40d   v1.18.20

kubectl drain k8s-worker2.example.com --ignore-daemonsets
k get nodes k8s-worker2.example.com
NAME                            STATUS                     ROLES    AGE   VERSION
k8s-worker2.example.com   Ready,SchedulingDisabled   <none>   41d   v1.18.20

# Login to the worker node and perform following actions
systemctl daemon-reload
systemctl restart kubelet

k get nodes k8s-worker2.example.com
NAME                            STATUS                     ROLES    AGE   VERSION
k8s-worker2.example.com   Ready,SchedulingDisabled   <none>   41d   v1.19.1

k uncordon k8s-worker2.example.com

k get nodes k8s-worker2.example.com
NAME                            STATUS   ROLES    AGE   VERSION
k8s-worker2.example.com   Ready    <none>   41d   v1.19.1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment