Skip to content

Instantly share code, notes, and snippets.

@Vertiwell
Created November 24, 2021 21:42
Show Gist options
  • Save Vertiwell/ca4415390f7e49a8af42b6d2a80978ce to your computer and use it in GitHub Desktop.
Save Vertiwell/ca4415390f7e49a8af42b6d2a80978ce to your computer and use it in GitHub Desktop.
k8s_deployment.sh
#!/bin/bash
### Deploying k8s using Ansible for Debian/Ubuntu based OS
## Baseline Guide: https://buildvirtual.net/deploy-a-kubernetes-cluster-using-ansible/
# Type of Deployment: Self - Baremetal
### Minimum Requirements ###
## This deployment requires at least 4 nodes, one to be the master and three to be workers, this allows apps deployed to works to have a quorum.
## Each node must have passwordless SSH, using a root user (but not root itself) add the following on each node:
# printf "username ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
#
## The following base packages are required:
# Ansible, this needs to be installed on the Master node from where all Anible commands will be run:
apt-get install ansible -y && \
#
## Set Variables:
# Set a version for Kubernetes, kubectl and Kubeadm, versions available here: https://kubernetes.io/releases/
VERSION=1.22.3
#
### Installation Steps ###
## Provide list of host IP addresses for cluster nodes (MASTER = first bootstrap control node, CONTROL = other control nodes, WORKER = worker nodes):
MASTERIP=192.168.1.70 && \
CONTROLIP=( ) && \
WORKERIP=( 192.168.1.71 192.168.1.72 192.168.1.73 ) && \
## Provide a root user (not root) to ssh to other nodes:
## If you don't yet have a user on each node, create one with this command:
# adduser username && usermod -aG sudo username
echo "Provide the root user (not root) to ssh to other nodes:"
read varuser
## Create a private key ID for the root user
ssh-keygen -q -t rsa -N '' <<< $'\ny' >/dev/null 2>&1 && \
ssh-copy-id $varuser@$MASTERIP && \
# Create Ansible inventory file
printf "[master]\nmaster0 ansible_host="$MASTERIP" ansible_user="$varuser"\n\n[masters]\n" > /etc/ansible/hosts && \
# Load IP addresses into a Ansible inventory file
COUNTER=1
for CONTROLIP in "${CONTROLIP[@]}" ; do
ssh-copy-id $varuser@$CONTROLIP
printf "master"$COUNTER" ansible_host="$CONTROLIP" ansible_user="$varuser"\n" >> /etc/ansible/hosts
COUNTER=$[$COUNTER +1]
done
printf "\n\n[workers]\n" >> /etc/ansible/hosts && \
COUNTER=1
for WORKERIP in "${WORKERIP[@]}" ; do
ssh-copy-id $varuser@$WORKERIP
printf "worker"$COUNTER" ansible_host="$WORKERIP" ansible_user="$varuser"\n" >> /etc/ansible/hosts
COUNTER=$[$COUNTER +1]
done
# This parameter makes sure the remote server uses the /usr/bin/python3 Python 3 executable instead of /usr/bin/python (Python 2.7), which is not present on recent Ubuntu versions.
printf "\n[all:vars]\nansible_python_interpreter=/usr/bin/python3" >> /etc/ansible/hosts && \
# Test
ansible -i /etc/ansible/hosts all -m ping && \
# Implement an Ansible Playbook to install k8s
cat <<EOF >/etc/ansible/install-k8s.yml
---
- name: Playbook
hosts: "master, masters, workers"
remote_user: $varuser
become: yes
become_method: sudo
become_user: root
gather_facts: yes
connection: ssh
tasks:
- name: Create containerd config file
file:
path: "/etc/modules-load.d/containerd.conf"
state: "touch"
- name: Add conf for containerd
blockinfile:
path: "/etc/modules-load.d/containerd.conf"
block: |
overlay
br_netfilter
- name: modprobe
shell: |
modprobe overlay
modprobe br_netfilter
- name: Set system configurations for Kubernetes networking
file:
path: "/etc/sysctl.d/99-kubernetes-cri.conf"
state: "touch"
- name: Add conf for containerd
blockinfile:
path: "/etc/sysctl.d/99-kubernetes-cri.conf"
block: |
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
- name: Apply new settings
command: sysctl --system
- name: install containerd
shell: |
apt update && apt install -y containerd
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
systemctl restart containerd
- name: disable swap
shell: |
swapoff -a
#!/bin/bash ed -s /etc/fstab <<< $'/swap/s/^/#/\n,w'
- name: install and configure dependencies
shell: |
apt update && apt install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
- name: Create kubernetes repo file
file:
path: "/etc/apt/sources.list.d/kubernetes.list"
state: "touch"
- name: Add K8s Source
blockinfile:
path: "/etc/apt/sources.list.d/kubernetes.list"
block: |
deb https://apt.kubernetes.io/ kubernetes-xenial main
- name: install kubernetes
shell: |
apt update
apt install -y kubelet=$VERSION-00 kubeadm=$VERSION-00 kubectl=$VERSION-00
apt-mark hold kubelet kubeadm kubectl
EOF
# Run the k8s playbook
ansible-playbook /etc/ansible/install-k8s.yml -i /etc/ansible/hosts && \
# Implement an Ansible Playbook to bootstrap the master k8s node
cat <<EOF >/etc/ansible/install-k8s-cluster.yml
- hosts: master
remote_user: $varuser
become: yes
become_method: sudo
become_user: root
tasks:
- name: initialize the cluster
shell: kubeadm init --apiserver-advertise-address=$MASTERIP --pod-network-cidr=10.43.0.0/16 --cri-socket /run/containerd/containerd.sock
- name: Creates directory
file:
path: $HOME/.kube
state: directory
- name: KUBECONFIG File - Implement for user
shell: |
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
- name: Download Calico Manifest
get_url: url=https://docs.projectcalico.org/manifests/calico.yaml
- name: Install Calico Networking
shell: |
sed '/ "type": "calico-ipam"/a\ },\n "container_settings": {\n "allow_ip_forwarding": true' calico.yaml > calico-custom.yaml
sed '/ # value: "192.168.0.0\/16"/a\ - name: CALICO_IPV4POOL_CIDR\n value: "10.43.0.0\/16"' calico-custom.yaml > calico.yaml
kubectl apply -f calico.yaml
kubectl apply -f https://docs.projectcalico.org/manifests/calicoctl.yaml
- name: Get the token for joining the worker nodes
shell: kubeadm token create --print-join-command
register: kubernetes_join_command
- name: Copy join command to local file.
become: yes
local_action: copy content="{{ kubernetes_join_command.stdout_lines[0] }}" dest="/tmp/kubernetes_join_command" mode=0777
EOF
# Run the k8s playbook
ansible-playbook /etc/ansible/install-k8s-cluster.yml -i /etc/ansible/hosts && \
# Implement an Ansible Playbook to join the control k8s nodes to the cluster
cat <<EOF >/etc/ansible/join-k8s-control.yml
- hosts: masters
become: yes
gather_facts: yes
tasks:
- name: Copy join command from Ansible host to the Control nodes.
become: yes
copy:
src: /tmp/kubernetes_join_command
dest: /tmp/kubernetes_join_command
mode: 0777
- name: Join the Control nodes to the cluster.
become: yes
command: sh /tmp/kubernetes_join_command
register: joined_or_not
EOF
# Run the k8s playbook if there are control nodes specified
if [ -z "$CONTROLIP" ]
then
echo "No Additional Control Nodes to Join (Skipping...)"
else
ansible-playbook /etc/ansible/join-k8s-control.yml -i /etc/ansible/hosts
fi
# Implement an Ansible Playbook to join the worker k8s nodes to the cluster
cat <<EOF >/etc/ansible/join-k8s-worker.yml
- hosts: workers
become: yes
gather_facts: yes
tasks:
- name: Copy join command from Ansible host to the worker nodes.
become: yes
copy:
src: /tmp/kubernetes_join_command
dest: /tmp/kubernetes_join_command
mode: 0777
- name: Join the Worker nodes to the cluster.
become: yes
command: sh /tmp/kubernetes_join_command
register: joined_or_not
EOF
# Run the k8s playbook
ansible-playbook /etc/ansible/join-k8s-worker.yml -i /etc/ansible/hosts && echo "Waiting for nodes to come up.." && sleep 30 && \
# Kick non-critical pods from the control nodes
kubectl taint nodes --selector=node-role.kubernetes.io/master CriticalAddonsOnly=true:NoExecute
# Wipe everything kubeadm reset; apt-get purge kubeadm kubectl kubelet kubernetes-cni kube*; apt-get autoremove; sudo rm -rf ~/.kube
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment