Last active
April 18, 2018 15:29
-
-
Save ruzickap/c86b0b54274020d90f1ea6b4af56b941 to your computer and use it in GitHub Desktop.
Install Kubernetes Multinode Cluster using kubeadm
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
### Master node installation | |
# SSH to the first VM which will be your Master node: | |
ssh root@node1 | |
# Set the Kubernetes version which will be installed: | |
KUBERNETES_VERSION="1.10.0" | |
# Set the proper CNI URL: | |
CNI_URL="https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml" | |
# For Flannel installation you need to use proper "pod-network-cidr": | |
POD_NETWORK_CIDR="10.244.0.0/16" | |
# Add the Kubernetes repository (details): | |
apt-get update -qq && apt-get install -y -qq apt-transport-https curl | |
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - | |
tee /etc/apt/sources.list.d/kubernetes.list << EOF2 | |
deb https://apt.kubernetes.io/ kubernetes-xenial main | |
EOF2 | |
# Install necessary packages: | |
apt-get update -qq | |
apt-get install -y -qq docker.io kubelet=${KUBERNETES_VERSION}-00 kubeadm=${KUBERNETES_VERSION}-00 kubectl=${KUBERNETES_VERSION}-00 | |
# Install Kubernetes Master: | |
kubeadm init --pod-network-cidr=$POD_NETWORK_CIDR --kubernetes-version v${KUBERNETES_VERSION} | |
# Copy the "kubectl" config files to the home directory: | |
test -d $HOME/.kube || mkdir $HOME/.kube | |
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config | |
chown -R $USER:$USER $HOME/.kube | |
# Install CNI: | |
export KUBECONFIG=/etc/kubernetes/admin.conf | |
kubectl apply -f $CNI_URL | |
# Your Kuberenets Master node should be ready now. You can check it using this command: | |
kubectl get nodes | |
### Worker nodes installation | |
# Let's connect the worker nodes now | |
# SSH to the worker nodes and repeat these commands on all of them in paralel: | |
ssh root@node2 | |
ssh root@node3 | |
ssh root@node4 | |
# Set the Kubernetes version which will be installed: | |
KUBERNETES_VERSION="1.10.0" | |
# Add the Kubernetes repository (details): | |
apt-get update -qq && apt-get install -y -qq apt-transport-https curl | |
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - | |
tee /etc/apt/sources.list.d/kubernetes.list << EOF2 | |
deb https://apt.kubernetes.io/ kubernetes-xenial main | |
EOF2 | |
# Install necessary packages: | |
apt-get update -qq | |
apt-get install -y -qq docker.io kubelet=${KUBERNETES_VERSION}-00 kubeadm=${KUBERNETES_VERSION}-00 kubectl=${KUBERNETES_VERSION}-00 | |
exit | |
# All the woker nodes are prepared now - let's connect them to master node. | |
# SSH to the master node again and generate the "joining" command: | |
ssh root@node1 "kubeadm token create --print-join-command" | |
# You sould see something like: | |
-> kubeadm join <master-ip>:<master-port> --token <token> --discovery-token-ca-cert-hash sha256:<hash> | |
# Execute the generated command on all worker nodes... | |
ssh -t root@node2 "kubeadm join --token ... ... ... ... ... ..." | |
ssh -t root@node3 "kubeadm join --token ... ... ... ... ... ..." | |
ssh -t root@node4 "kubeadm join --token ... ... ... ... ... ..." | |
# SSH back to the master nodes and check the cluster status - all the nodes should appear there in "Ready" status after while. | |
ssh root@node1 | |
# Check nodes | |
kubectl get nodes |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment