Skip to content

Instantly share code, notes, and snippets.

@rizerzero
Forked from kacole2/1a-steps.md
Created December 2, 2019 16:41
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save rizerzero/5ea1b55335b1c54364104a42c5ce75ab to your computer and use it in GitHub Desktop.
Save rizerzero/5ea1b55335b1c54364104a42c5ce75ab to your computer and use it in GitHub Desktop.
Kubernetes 1.14.1 Installation using kubeadm on vSphere with CentOS7

Steps to Install Kubernetes on CentOS7 with Kubeadm and vSphere

  1. On the master node: Edit the vsphere.conf file within the kubeadm-master.sh to match your environment. Copy kubeadm-master.sh to the master node:
sudo chmod u+x kubeadm-master.sh
sudo ./kubeadm-master.sh
  1. On each worker node copy kubeadm-worker.sh:
sudo chmod u+x kubeadm-worker.sh
sudo ./kubeadm-worker.sh
  1. On the master node, install flannel:
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
  1. On the master node, check to make sure the pods are all in the status Running:
$ kubectl get pods --all-namespaces
  1. On the master node, export the master node config used to point the workers being joined to the master:
kubectl -n kube-public get configmap cluster-info -o jsonpath='{.data.kubeconfig}' > discovery.yaml
  1. On the master node, transfer the discovery.yaml to every worker that will need it
scp discovery.yaml kcoleman@10.IP.ADD.RESS:~/discovery.yaml
  1. On the worker node, make sure yaml was successfully transferred:
cat discovery.yaml
  1. On the worker node, join it to the cluster with kubeadm:
sudo kubeadm join --config /etc/kubernetes/kubeadminitworker.yaml
  1. On the master node, verify the nodes were added to the cluster and the Provider for vSphere is correct:
$ kubectl get nodes -o wide
$ kubectl describe nodes | grep "ProviderID"
  1. On the master node, create the StorageClass and a PersistentVolumeClaim to verify storage connectivity:
kubectl apply -f sc.yaml
kubectl apply -f pvc.yaml
#!/bin/bash
#kubeadm master on CentOS 7
# Housekeeping
yum update -y
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
systemctl disable firewalld
systemctl stop firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
echo "Housekeeping done"
#Install Docker
yum install -y yum-utils device-mapper-persistent-data lvm2 yum-plugin-versionlock
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce-18.06.2.ce-3.el7 docker-ce-cli-18.06.2.ce-3.el7 containerd.io
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl enable docker
groupadd docker
MAINUSER=$(logname)
usermod -aG docker $MAINUSER
systemctl start docker
yum versionlock docker-ce docker-ce-cli containerd.io
echo "Docker Installation done"
#Install K8s Stuff
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
yum install -y kubelet-1.14.1-0 kubeadm-1.14.1-0 kubectl-1.14.1-0 --disableexcludes=kubernetes
echo "Kube Stuff done"
systemctl enable --now kubelet
systemctl start kubelet
yum versionlock kubelet kubeadm kubectl
echo "Kubelet started done"
#Network Stuff
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
echo "Network Stuff done"
#Create the vSphere Config File
tee /etc/kubernetes/vsphere.conf >/dev/null <<EOF
[Global]
user = "administrator@vsphere.local"
password = "VMware1!"
port = "443"
insecure-flag = "1"
[VirtualCenter "10.IP.ADD.RESS"]
datacenters = "Datacenter-A"
[Workspace]
server = "10.IP.ADD.RESS"
datacenter = "Datacenter-A"
default-datastore = "vsanDatastore"
resourcepool-path = "Cluster-A/Resources"
folder = "kubernetes"
[Disk]
scsicontrollertype = pvscsi
[Network]
public-network = "K8s"
EOF
#Activate the vSphere Cloud Provider in our kubeadm init config file.
#Additionally, as we are deploying flannel as our overlay network for pods and
# it requires the below subnet CIDR in order for the overlay to work.
tee /etc/kubernetes/kubeadminitmaster.yaml >/dev/null <<EOF
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: y7yaev.9dvwxx6ny4ef8vlq
ttl: 0s
usages:
- signing
- authentication
nodeRegistration:
kubeletExtraArgs:
cloud-provider: "vsphere"
cloud-config: "/etc/kubernetes/vsphere.conf"
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.1
apiServer:
extraArgs:
cloud-provider: "vsphere"
cloud-config: "/etc/kubernetes/vsphere.conf"
extraVolumes:
- name: cloud
hostPath: "/etc/kubernetes/vsphere.conf"
mountPath: "/etc/kubernetes/vsphere.conf"
controllerManager:
extraArgs:
cloud-provider: "vsphere"
cloud-config: "/etc/kubernetes/vsphere.conf"
extraVolumes:
- name: cloud
hostPath: "/etc/kubernetes/vsphere.conf"
mountPath: "/etc/kubernetes/vsphere.conf"
networking:
podSubnet: "10.244.0.0/16"
EOF
# Restart the kubelet daemon to reload the configuration
systemctl daemon-reload
systemctl restart kubelet
# Verify we have all the images
kubeadm config images pull
#Init kubeadm
kubeadm init --config /etc/kubernetes/kubeadminitmaster.yaml
#kubectl work for user
mkdir -p /home/$MAINUSER/.kube
cp -i /etc/kubernetes/admin.conf /home/$MAINUSER/.kube/config
chown -R ${MAINUSER}:${MAINUSER} /home/${MAINUSER}/.kube
#!/bin/bash
#kubeadm worker on CentOS 7
# Housekeeping
yum update -y
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
systemctl disable firewalld
systemctl stop firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
echo "Housekeeping done"
#Install Docker
yum install -y yum-utils device-mapper-persistent-data lvm2 yum-plugin-versionlock
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce-18.06.2.ce-3.el7 docker-ce-cli-18.06.2.ce-3.el7 containerd.io
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl enable docker
groupadd docker
MAINUSER=$(logname)
usermod -aG docker $MAINUSER
systemctl start docker
yum versionlock docker-ce docker-ce-cli containerd.io
echo "Docker Installation done"
#Install K8s Stuff
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
yum install -y kubelet-1.14.1-0 kubeadm-1.14.1-0 kubectl-1.14.1-0 --disableexcludes=kubernetes
echo "Kube Stuff done"
systemctl enable --now kubelet
systemctl start kubelet
yum versionlock kubelet kubeadm kubectl
echo "Kubelet started done"
#Network Stuff
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
echo "Network Stuff done"
#Kubeadm config
tee /etc/kubernetes/kubeadminitworker.yaml >/dev/null <<EOF
apiVersion: kubeadm.k8s.io/v1beta1
discovery:
file:
kubeConfigPath: discovery.yaml
timeout: 5m0s
tlsBootstrapToken: y7yaev.9dvwxx6ny4ef8vlq
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
cloud-provider: vsphere
EOF
echo "Kubeadm config ready"
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-claim
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: vsan-default
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/vsphere-volume
parameters:
storagePolicyName: "vSAN Default Storage Policy"
datastore: vsanDatastore
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment