Skip to content

Instantly share code, notes, and snippets.

@c3d
Created November 26, 2020 17:52
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save c3d/55adcde7ce93cd09bd04361299c6d288 to your computer and use it in GitHub Desktop.
Save c3d/55adcde7ce93cd09bd04361299c6d288 to your computer and use it in GitHub Desktop.
Simple Cluster Start script
#!/bin/bash
MASTERS=shuttle
WORKERS="muse big"
run() {
echo "Sending host $HOST command '$@'"
ssh root@$HOST "$@"
}
die() {
echo $@
exit 1
}
# This is an idiotic workaround, but there seems to be a race
# condition, and if this is not done, then the default CNI network
# 10.88.0.0 is selected on some nodes instead of 10.244.0.0.
# and pods start failing complaining that cni0 is misconfigured.
check_cni() {
echo '####### CHECKING CNI ##########'
for HOST in $MASTERS $WORKERS; do
run "ifconfig cni0"
done
echo '####### DONE CHECKING CNI ##########'
}
# Shutdown anything that is presently running
echo "Killing proxy and other kubectl processes"
killall kubectl
echo "Shutting down the cluster nodes: $MASTERS $WORKERS"
for HOST in $WORKERS $MASTERS; do
run "kubeadm reset -f"
run "ifconfig cni0 down"
run "ifconfig docker0 down"
run "brctl delbr cni0"
run "rm -f /etc/cni/net.d/10-flannel.conflist"
run "dnf update -y --disableexcludes=kubernetes kubectl kubeadm kubelet"
done
check_cni
# Start the kubeadm init
for HOST in $MASTERS; do
run "kubeadm init --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=Swap" | tee /tmp/cluster-start.log || die "Error in kubeadm init, retry"
done
check_cni
# Extract join command
JOIN=$(echo $(tail -2 /tmp/cluster-start.log | sed -e 's|\\||g') --ignore-preflight-errors=Swap)
echo "Will join the cluster using $JOIN"
# Join the workers
for HOST in $WORKERS; do
run "$JOIN" || die "Host $HOST did not join correctly"
done
check_cni
# Copy the configuration file
echo "Copying the configuration files"
for HOST in $MASTERS; do
mkdir -p ~/.kube
scp root@$HOST:/etc/kubernetes/admin.conf ~/.kube/config || die "Could not find the configuration file"
done
check_cni
# Check the nodes
kubectl get nodes
check_cni
# Apply Flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
check_cni
# Apply Kubernetes Dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
#kubectl apply -f ~/k8s/kubernetes-dashboard-cluster-role-binding.yaml
#kubectl apply -f ~/k8s/kubernetes-dashboard-admin-user.yaml
kubectl apply -f ~/k8s/kubernetes-dashboard.yaml
check_cni
# Get the admin that was just created
ADMIN_SECRET=$(kubectl -n kubernetes-dashboard get secrets -o custom-columns=:metadata.name | grep ddd-admin)
SECRET_TOKEN=$(kubectl -n kubernetes-dashboard describe secret $ADMIN_SECRET | grep token:)
echo "To access the dashboard, run 'kubectl proxy' and then "
echo "open http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy"
echo "To login on the dash board, use secret $SECRET_TOKEN"
check_cni
echo "###"
echo "Starting the proxy locally"
kubectl proxy &
check_cni
# Required NFS bindings
kubectl apply -f ~/k8s/nfs.yaml
kubectl apply -f ~/k8s/nfs-claim.yaml
# Kata RBAC and runtime class
kubectl apply -f ~/k8s/kata-rbac.yaml
kubectl apply -f ~/k8s/kata-qemu-runtimeClass.yaml
# Deploy Kata
kubectl apply -f ~/k8s/kata-deploy.yaml
# Jenkins - Kata version
kubectl apply -f ~/k8s/jenkins-kata-deployment.yaml
kubectl apply -f ~/k8s/jenkins-kata-service.yaml
#
# kubectl get svc --all-namespaces -o go-template='{{range .items}}{{range.spec.ports}}{{if .nodePort}}{{.nodePort}}{{.}}{{"\n"}}{{end}}{{end}}{{end}}'
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment