Skip to content

Instantly share code, notes, and snippets.

@Vertiwell
Created November 12, 2021 03:51
Show Gist options
  • Save Vertiwell/d32fc3550dabdf07f2b0ee23f7224b86 to your computer and use it in GitHub Desktop.
Save Vertiwell/d32fc3550dabdf07f2b0ee23f7224b86 to your computer and use it in GitHub Desktop.
k3s_deployment.sh
#!/bin/bash
### Deploying k3s for Debian/Ubuntu based OS
## Baseline Guide: https://github.com/alexellis/k3sup
# Type of Deployment: Self - Baremetal
### Minimum Requirements ###
## This deployment requires at least 4 nodes, one to be the controller and three to be workers, this allows apps deployed to works to have a quorum.
## Each node must have passwordless SSH, using a root user (but not root itself) add the following on each node:
# printf "username ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
#
### Installation Steps ###
## Provide a version of K3S to use (List is here: https://update.k3s.io/v1-release/channels):
VERSION=v1.22.3+k3s1
## Provide list of host IP addresses for cluster nodes, Master is the bootstrap control node, if more control nodes, add to controlip, if not leave controlip blank, minimum 3 agent node IPs:
MASTERIP=192.168.1.169 && \
CONTROLIP=( ) && \
AGENTIP=( 192.168.1.156 192.168.1.153 192.168.1.189 ) && \
## Provide a root user (not root) to ssh to other nodes:
## If you don't yet have a user on each node, create one with this command:
# adduser username && usermod -aG sudo username
echo "Provide the root user (not root) to ssh to other nodes:"
read varuser
## Create a private key ID for the root user
ssh-keygen -q -t rsa -N '' <<< $'\ny' >/dev/null 2>&1 && \
# Copy the SSH IDs and push IP's to files
rm cnodes wnodes; \
# Master
ssh-copy-id $varuser@$MASTERIP && \
for CONTROLIP in "${CONTROLIP[@]}" ; do
ssh-copy-id $varuser@$CONTROLIP; \
echo $CONTROLIP >> cnodes
done
for AGENTIP in "${AGENTIP[@]}" ; do
ssh-copy-id $varuser@$AGENTIP; \
echo $AGENTIP >> wnodes
done
#
## The following base packages are required for this installation:
# k3sup, k3s installation tool
curl -sLS https://get.k3sup.dev | sh; install k3sup /usr/local/bin/; sleep 5; k3sup version && \
# kubectl, engage with the k0s cluster
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl && \
### Installation loops
## Master Node
k3sup install --ip $MASTERIP --user $varuser --cluster --k3s-version $VERSION --k3s-extra-args '--node-taint CriticalAddonsOnly=true:NoExecute --cluster-init --disable servicelb --disable traefik --flannel-backend=none --disable-network-policy --cluster-cidr=10.43.0.0/16' && \
# Set Kubeconfig for kubectl
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config && chmod go-r ~/.kube/config && sleep 10 && \
# Add the network layer to allow containers to talk between each other: https://www.projectcalico.org/
wget https://docs.projectcalico.org/manifests/calico.yaml && sed '/ "type": "calico-ipam"/a\ },\n "container_settings": {\n "allow_ip_forwarding": true' calico.yaml > calico-custom.yaml && sed '/ # value: "192.168.0.0\/16"/a\ - name: CALICO_IPV4POOL_CIDR\n value: "10.43.0.0\/16"' calico-custom.yaml > calico.yaml && kubectl apply -f calico.yaml && \
kubectl apply -f https://docs.projectcalico.org/manifests/calicoctl.yaml && \
# Wait for Calico to be up before moving on, takes about 1min
ROLLOUT_STATUS_CMD="kubectl rollout status -w --timeout=90s deployment/calico-kube-controllers -n kube-system"
until $ROLLOUT_STATUS_CMD || [ $n -eq 300 ]; do
$ROLLOUT_STATUS_CMD
n=$((n + 1))
sleep 5
done
# Control Nodes in addition to the Master Node
while IFS="" read -r p || [ -n "$p" ]
do
k3sup join --ip $p --user $varuser --server-user $varuser --server-ip $MASTERIP --server --k3s-version $VERSION --k3s-extra-args '--node-taint CriticalAddonsOnly=true:NoExecute --disable traefik --disable servicelb' && \
sleep 5 && echo "$p Installed"
done < cnodes
# Worker Nodes
while IFS="" read -r p || [ -n "$p" ]
do
k3sup join --ip $p --user $varuser --server-user $varuser --server-ip $MASTERIP --k3s-version $VERSION && \
sleep 5 && echo "$p Installed"
done < wnodes
# Test
kubectl get nodes
# Cleanup
rm calico.yaml calico-custom.yaml kubeconfig cnodes wnodes
# Wipe everything: /usr/local/bin/k3s-uninstall.sh on each control node, /usr/local/bin/k3s-agent-uninstall.sh on each worker node
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment