Skip to content

Instantly share code, notes, and snippets.

@GuyBarros
Created November 28, 2023 20:46
Show Gist options
  • Save GuyBarros/301022e666a6407e690c19917d193b58 to your computer and use it in GitHub Desktop.
Save GuyBarros/301022e666a6407e690c19917d193b58 to your computer and use it in GitHub Desktop.
Multipass CKA single script set up
multipass launch -m2G -c2 -d5G -n "k8scp" lts --network "en0" 

K8SCP

wget https://cm.lf.training/LFS258/LFS258_V2023-09-14_SOLUTIONS.tar.xz --user=LFtraining --password=Penguin2014

tar -xvf LFS258_V2023-09-14_SOLUTIONS.tar.xz

sudo su
echo "update apt and install dependencies"
apt-get update && apt-get upgrade -y
apt-get install -y vim curl apt-transport-https vim git wget software-properties-common lsb-release ca-certificates net-tools bash-completion

echo "turn off swap"
swapoff -a

echo "Update kernel networking to allow necessary traffic."
modprobe overlay
modprobe br_netfilter

cat << EOF | tee /etc/sysctl.d/kubernetes.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

echo "Ensure the changes are used by the current kernel as well"
sysctl --system
echo "Install the necessary key for the software to container.d"

mkdir -p /etc/apt/keyrings

curl -fsSL https://download.docker.com/linux/ubuntu/gpg \ | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg

echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

apt-get update && apt-get install containerd.io -y
containerd config default | tee /etc/containerd/config.toml
sed -e 's/SystemdCgroup = false/SystemdCgroup = true/g' -i /etc/containerd/config.toml
systemctl restart containerd

echo "Add a new repo for kubernetes."
cat << EOF | tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF

echo "Add a GPG key for the k8s packages."
curl -s \
https://packages.cloud.google.com/apt/doc/apt-key.gpg \
| apt-key add -

echo "update Apt (again)"
apt-get update

echo "install K8S v 1.27.1-00 to practise upgrading"
apt-get install -y kubeadm=1.27.1-00 kubelet=1.27.1-00 kubectl=1.27.1-00

echo "add autocompletion to kubectl"
source <(kubectl completion bash)

echo "source <(kubectl completion bash)" >> $HOME/.bashrc
echo "add ip fron enp0s2 to /etc/hosts as k8scp"

echo "Get the IPv4 address from interface enp0s1"
ip_address=$(ifconfig enp0s1 | awk '/inet / {print $2}' | cut -d':' -f2)

echo "Check if an IP address is obtained"
if [ -z "$ip_address" ]; then
  echo "Error: Unable to obtain the IP address from enp0s2."
  exit 1
fi

echo "Specify the FQDN"
fqdn="k8scp"

echo "Add the IP address and FQDN to /etc/hosts"
echo "$ip_address $fqdn" | sudo tee -a /etc/hosts
echo "127.0.0.1 localhost" | sudo tee -a /etc/hosts


echo "Display a message"
echo "Successfully added $fqdn with IP address $ip_address to /etc/hosts."


echo "create the kubeadm config that will be used to install kubernetes"
cat << EOF | tee kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: 1.27.1
controlPlaneEndpoint: "k8scp:6443"
networking:
    podSubnet: 192.168.0.0/16
EOF


echo "start K8s controlplane remember to save the join token"
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.out 

exit

temp

kubeadm join code: kubeadm join k8scp:6443 --token 2g0o5b.5xl31pw12a870pog --discovery-token-ca-cert-hash sha256:694fa5104d0c5268b24dce17c4ac4272a221c0386cb04f479f8bffcb37872d85

make sure you arent in root

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
less .kube/config

kubeclt get pods --all-namespaces

kubectl apply -f $(find $HOME -name cilium-cni.yaml)

source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> $HOME/.bashrc

$SHELL

Add another node

multipass launch -m2G -c2 -d5G -n "worker" lts --network "en0" 
wget https://cm.lf.training/LFS258/LFS258_V2023-09-14_SOLUTIONS.tar.xz --user=LFtraining --password=Penguin2014

tar -xvf LFS258_V2023-09-14_SOLUTIONS.tar.xz

sudo su
echo "update apt and install dependencies"
apt-get update && apt-get upgrade -y
apt-get install -y vim curl apt-transport-https vim git wget software-properties-common lsb-release ca-certificates net-tools bash-completion

echo "turn off swap"
swapoff -a

echo "Update kernel networking to allow necessary traffic."
modprobe overlay
modprobe br_netfilter

cat << EOF | tee /etc/sysctl.d/kubernetes.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

echo "Ensure the changes are used by the current kernel as well"
sysctl --system

echo "Install the necessary key for the software to container.d"

mkdir -p /etc/apt/keyrings

curl -fsSL https://download.docker.com/linux/ubuntu/gpg \ | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg

echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

apt-get update && apt-get install containerd.io -y
containerd config default | tee /etc/containerd/config.toml
sed -e 's/SystemdCgroup = false/SystemdCgroup = true/g' -i /etc/containerd/config.toml
systemctl restart containerd

echo "Add a new repo for kubernetes."
cat << EOF | tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF

echo "Add a GPG key for the k8s packages."
curl -s \
https://packages.cloud.google.com/apt/doc/apt-key.gpg \
| apt-key add -

echo "update Apt (again)"
apt-get update

echo "install K8S v 1.27.1-00 to practise upgrading"
apt-get install -y kubeadm=1.27.1-00 kubelet=1.27.1-00 kubectl=1.27.1-00

echo "add autocompletion to kubectl"
source <(kubectl completion bash)

echo "source <(kubectl completion bash)" >> $HOME/.bashrc

apt-mark hold kubeadm kubelet kubectl
echo "add ip fron enp0s2 to /etc/hosts as worker"

echo "Get the IPv4 address from interface enp0s1"
ip_address=$(ifconfig enp0s1 | awk '/inet / {print $2}' | cut -d':' -f2)

echo "Check if an IP address is obtained"
if [ -z "$ip_address" ]; then
  echo "Error: Unable to obtain the IP address from enp0s2."
  exit 1
fi

echo "Specify the FQDN"
fqdn="worker"

echo "Add the IP address and FQDN to /etc/hosts"
echo "$ip_address $fqdn" | sudo tee -a /etc/hosts
echo "127.0.0.1 localhost" | sudo tee -a /etc/hosts
echo "192.168.68.9 k8scp" | sudo tee -a /etc/hosts


echo "Display a message"
echo "Successfully added $fqdn with IP address $ip_address to /etc/hosts."

run kubeadm init output

finish the setup

untaint CP node

kubectl describe node | grep -i taint
kubectl get pods --all-namespaces

kubectl taint nodes --all node-role.kubernetes.io/control-plane-

Persistent Volumes and Persistent Volumes Claims

on k8scp node:

sudo mkdir /opt/sfw
sudo chmod 1777 /opt/sfw/
sudo bash -c 'echo software > /opt/sfw/hello.txt'

Persistent Volume YAML:

apiVersion: v1
kind: PersistentVolume
metadata:
  name: example-pv
spec:
  capacity:
    storage: 1Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/opt/sfw"

Persistent Volume Claim YAML:

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: example-pv-claim
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 500Mi

Pod with Persisten Volume YAML:

apiVersion: v1
kind: Pod
metadata:
  name: example-pod
spec:
  volumes:
    - name: my-pv-storage
      persistentVolumeClaim:
        claimName: example-pv-claim
  containers:
    - name: example-container
      image: nginx
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: my-pv-storage

create a pod with two containers that share a EmptyDir Volume

apiVersion: v1
kind: Pod
metadata:
  name: sharedvol-pod
spec:
  containers:
    - name: container1
      image: nginx
      volumeMounts:
        - name: shared-volume
          mountPath: /data
    - name: container2
      image: busybox
      command: ["/bin/sh", "-c", "while true; do echo Hello from Container 2 >> /data/index.html; sleep 10; done"]
      volumeMounts:
        - name: shared-volume
          mountPath: /data
  volumes:
    - name: shared-volume
      emptyDir: {}

check if sharedvol is working :

kubectl exec -it sharedvol-pod -c container1 -- /bin/sh
cd /data
cat index.html

Services

nginx with ports YAML (nginx-one.yaml)

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-one
  labels:
    system: secondary
  namespace: accounting
spec:
  selector:
    matchLabels:
      system: secondary
  replicas: 2
  template:
    metadata:
      labels:
        system: secondary
    spec:
      containers:
      - image: nginx:1.20.1
        imagePullPolicy: Always
        name: nginx
        ports:
        - containerPort: 80
          protocol: TCP
      nodeSelector:
        system: second0ne

create namespace accounting

kubectl create namespace accounting

label worker node

kubectl label node <worker_node_name> system=secondOne

deploy the yaml file

kubectl apply -f nginx-one.yaml

create a service to expose the containerPort

kubectl -n accounting expose deployment nginx-one

get the endpoint && test with curl

kubectl -n accounting get ep nginx-one
curl 192.168.1.18:80

expose as nodePort and named "service-lab"

kubectl -n accounting expose deployment nginx-one --type=NodePort --name=service-lab
kubectl -n accounting describe services
kubectl -n accounting get svc service-lab
curl http://k8scp:30878

use coreDNS

nettools.yaml

apiVersion: v1
kind: Pod
metadata:
    name: ubuntu
spec:
    containers:
    - name: ubuntu
      image: ubuntu:latest
	  command: ["sleep"]
	  args: ["infinity"]

exec into ubuntu and apt get update

kubectl exec -it ubuntu -- /bin/bash
apt-get update ; apt-get install curl dnsutils -
dig

edit the coredns config map to add another FQDN

data:
  Corefile: |
    .:53 {
        rewrite stop { #<- add these lines 
          name regex (.*)\.test\.io {1}.default.svc.cluster.local  #<- add these lines
          answer name (.*)\.default\.svc\.cluster\.local {1}.test.io  #<- add these lines
        }  #<- add these lines
        errors
        health {
           lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
           pods insecure
           fallthrough in-addr.arpa ip6.arpa
           ttl 30
        }
        prometheus :9153
        forward . /etc/resolv.conf {
           max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
kind: ConfigMap
metadata:
  creationTimestamp: "2023-11-26T18:39:44Z"
  name: coredns
  namespace: kube-system

managing resources with labels

kubectl delete pods -l system=secondary --all-namespaces
kubectl -n accounting get pods
kubectl -n accounting get deploy --show-labels
kubectl -n accounting delete deploy -l system=secondary
kubectl label node worker system-

Helm

install helm from apt

curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
sudo apt-get install apt-transport-https --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helm
helm search hub database
helm repo add ealenn https://ealenn.github.io/charts
helm repo update
helm upgrade -i tester ealenn/echo-server --debug
helm list
helm uninstall tester
helm repo add bitnami https://charts.bitnami.com/bitnami
helm fetch bitnami/apache --untar
cd apache/

Service Mesh

linkerd

curl -sL run.linkerd.io/install | sh
export PATH=$PATH:/home/ubuntu/.linkerd2/bin
echo "export PATH=$PATH:/home/ubuntu/.linkerd2/bin" >> $HOME/.bashrc
linkerd check --pre
linkerd install --crds | kubectl apply -f -
linkerd install | kubectl apply -f -
linkerd check
linkerd viz install | kubectl apply -f -
linkerd viz check
linkerd viz dashboard &

didnot work :(

Ingress Controller

helm repo add nginx-stable https://helm.nginx.com/stable
helm repo update
helm fetch ingress-nginx/ingress-nginx --untar
helm install myingress nginx-stable/nginx-ingress --set controller.service.type=NodePort --set controller.service.httpPort.nodePort=30000 --set controller.service.httpsPort.nodePort=30443 --set controller.kind=daemonset

vim ingress.yaml

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-test
  annotations:
    nginx.ingress.kuberenetes.io/service-upstream: "true"
  namespace: default
spec:
  ingressClassName: nginx
  rules:
  - host: www.external.com
    http:
      paths:
      - backend:
          service:
            name: secondapp
            port:
              number: 80
        path: /
        pathType: ImplementationSpecific
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment