Skip to content

Instantly share code, notes, and snippets.

@jclaret
Last active September 6, 2020 17:25
Show Gist options
  • Save jclaret/4ac1ba0f5b6dbfa37a50d6df154fc5c6 to your computer and use it in GitHub Desktop.
Save jclaret/4ac1ba0f5b6dbfa37a50d6df154fc5c6 to your computer and use it in GitHub Desktop.

Install Kubernetes

Master node

$ apt-get update && apt-get upgrade -y

$ apt-get install -y vim

$ apt-get install -y docker.io ; systemctl enable docker.service

$ vim /etc/apt/sources.list.d/kubernetes.list

deb  http://apt.kubernetes.io/  kubernetes-xenial  main

$ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -

$ apt-get update

$ apt-get install -y kubeadm=1.18.1-00 kubelet=1.18.1-00 kubectl=1.18.1-00

$ apt-mark hold kubelet kubeadm kubectl

$ wget https://docs.projectcalico.org/manifests/calico.yaml

- name: CALICO_IPV4POOL_CIDR6
 value:"192.168.1.0/16"

$ ip addr show

$ vim /etc/hosts

192.168.0.61 k8smaster

$ vim kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: 1.18.1
controlPlaneEndpoint: "k8smaster:6443"
networking:
  podSubnet: 192.168.1.0/16

$ kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.out

$ mkdir -p $HOME/.kube

$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

$ less .kube/config

apiVersion: v1
clusters:
- cluster: 
<output_omitted>

$ sudo cp /root/calico.yaml .

$ kubectl apply -f calico.yaml

$ sudo apt-get install bash-completion -y

$ source <(kubectl completion bash)

$ echo "source <(kubectl completion bash)" >> $HOME/.bashrc

$ sudo kubeadm config print init-defaults

Worker node

$ sudo -i

$ apt-get update && apt-get upgrade -y

$ apt-get install -y docker.io ; systemctl enable docker.service

$ ip addr show < Get IP master node

$ kubeadm token list < master node

$ kubeadm token create < master node (if token does not exist)

$ openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/ˆ.* //' < master node

$ vim /etc/hosts

10.128.0.3 k8smaster

$ vim /etc/apt/sources.list.d/kubernetes.list

deb http://apt.kubernetes.io/ kubernetes-xenial main

$ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -

$ apt-get update

$ apt-get install -y kubeadm=1.18.1-00 kubelet=1.18.1-00 kubectl=1.18.1-00

$ kubeadm join --token 6qfu3u.de1f2e3v3en99b51 k8smaster:6443 --discovery-token-ca-cert-hash sha256:c8c524f6ddde3ffa639b458febcc957137002a73e75976bce40183a9636eaa53

$ kubectl get nodes < master node

NAME              STATUS   ROLES    AGE     VERSION
ip-192-168-0-75   Ready    <none>   77s     v1.18.1
ip-192-168-0-81   Ready    master   3h33m   v1.18.1

Finish Cluster Setup

$ kubectl get node

$ kubectl describe node ip-192-168-0-81 (master node)

$ kubectl describe node | grep -i taint

$ kubectl taint nodes --all node-role.kubernetes.io/master- (Allow the master server to run non-infrastructure pods)

$ kubectl describe node | grep -i taint

$ kubectl taint nodes --all node.kubernetes.io/not-ready-

$kubectl get pods --all-namespaces

NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-854c58bf56-mf7vm   1/1     Running   0          3h39m
kube-system   calico-node-dtvfw                          1/1     Running   0          3h39m
kube-system   calico-node-vgjlj                          1/1     Running   0          8m31s
kube-system   coredns-66bff467f8-9ljbl                   1/1     Running   0          3h40m
kube-system   coredns-66bff467f8-f5l4j                   1/1     Running   0          3h40m
kube-system   etcd-ip-192-168-0-81                       1/1     Running   0          3h40m
kube-system   kube-apiserver-ip-192-168-0-81             1/1     Running   0          3h40m
kube-system   kube-controller-manager-ip-192-168-0-81    1/1     Running   0          3h40m
kube-system   kube-proxy-rdhx5                           1/1     Running   0          8m31s
kube-system   kube-proxy-rpdkw                           1/1     Running   0          3h40m
kube-system   kube-scheduler-ip-192-168-0-81             1/1     Running   0          3h40m

$ kubectl -n kube-system delete pod coredns-66bff467f8-9ljbl coredns-66bff467f8-f5l4j

Deploy and application

$ kubectl create deployment nginx --image=nginx

$ kubectl get deployments

$ kubectl describe deployment nginx

$ kubectl get events

$ kubectl get deployment nginx -o yaml

$ kubectl get deployment nginx -o yaml > first.yaml

$ vim first.yaml (Remove thecreationTimestamp,resourceVersion,selfLink,and uid lines)

$ kubectl delete deployment nginx

$ kubectl create -f first.yaml

$ kubectl get deployment nginx -o yaml > second.yaml

$ diff first.yaml second.yaml

$ kubectl create deployment two --image=nginx --dry-run=client -o yaml

$ kubectl get deployment

$ kubectl get deployments nginx -o yaml

$ kubectl get deployment nginx -o json

$ kubectl expose -h

$ kubectl expose deployment/nginx

$ vim first.yaml

   spec:
      containers:
      - image: nginx
        imagePullPolicy: Always
        name: nginx
        ports:                     <<<
        - containerPort: 80        <<<    
          protocol: TCP            <<<

$ kubectl replace -f first.yaml

$ kubectl get deploy,pod

$ kubectl expose deployment/nginx

$ kubectl get svc nginx

$ kubectl get ep nginx

$ kubectl describe pod nginx-7cbc4b4d9c-d27xw | grep Node:

$ sudo tcpdump -i tunl0

$ curl 10.100.61.122:80

$ curl 192.168.1.5:80

$ kubectl get deployment nginx

$ kubectl scale deployment nginx --replicas=3

$ kubectl get deployment nginx

$ kubectl get ep nginx

$ kubectl get pod -o wide

$ kubectl delete pod nginx-1423793266-7f1qw

$ kubectl get po

$ kubectl get ep nginx

$ curl 10.100.61.122:80

Access from Outside the Cluster

$ kubectl get po

$ kubectl exec nginx-1423793266-13p69 -- printenv |grep KUBERNETES

$ kubectl get svc

$ kubectl delete svc nginx

$ kubectl expose deployment nginx --type=LoadBalancer

$ kubectl get svc

$ Open a browser on your local system and use the public IP of your node and port 32753

$ kubectl scale deployment nginx --replicas=0

$ kubectl get po

$ kubectl scale deployment nginx --replicas=2

$ kubectl get po

$ kubectl delete deployments nginx

$ kubectl delete ep nginx

$ kubectl delete svc nginx

Kubernetes Architecture

The Kubernetes master runs various server and manager processes for the cluster. Among the components of the master node are

  • the kube-apiserver
  • the kube-scheduler
  • the etcd database
  • the cloud-controller-manager - it handles tasks once handled by the kube-controller-manager to interact with other tools, such as Rancher or DigitalOcean for third-party cluster management and reporting.

Basic Node Maintenance

Backup The etcd Database

$master> grep data-dir /etc/kubernetes/manifests/etcd.yaml

$master> kubectl -n kube-system exec -it etcd- -- sh

 # etcdctl -h
 # find . -name etcd
 # cd /etc/kubernetes/pki/etcd
 # ETCDCTL_API=3 etcdctl --cert=./peer.crt --key=./peer.key --cacert=./ca.crt --endpoints=https://127.0.0.1:2379 endpoint health
 # ETCDCTL_API=3 etcdctl --cert=./peer.crt --key=./peer.key --cacert=./ca.crt --endpoints=https://127.0.0.1:2379 member list
 # ETCDCTL_API=3 etcdctl --cert=./peer.crt --key=./peer.key --cacert=./ca.crt --endpoints=https://127.0.0.1:2379 snapshot save /var/lib/etcd/snapshot.db
 # ls -l /var/lib/etcd/
 # exit

$master> ls -l /var/lib/etcd/

$master> mkdir $HOME/backup

$master> cp /var/lib/etcd/snapshot.db $HOME/backup/snapshot.db-$(date +%m-%d-%y)

$master> cp /root/kubeadm-config.yaml $HOME/backup/

$master> cp -r /etc/kubernetes/pki/etcd $HOME/backup/

Upgrade the Cluster

$master> apt update; apt-cache madison kubeadm; apt-mark unhold kubeadm; apt-get install -y kubeadm=1.19.0-00; apt-mark hold kubeadm

$master> kubeadm version

$master> kubectl drain ip-192-168-0-103 (master) --ignore-daemonsets

$master> kubeadm upgrade plan

$master> kubeadm upgrade apply v1.19.0

$master> kubectl get node

$master> apt-mark unhold kubelet kubectl ; apt-get install -y kubelet=1.19.0-00 kubectl=1.19.0-00; apt-mark hold kubelet kubectl

$master> systemctl daemon-reload; systemctl restart kubelet

$master> kubectl get node

$master> kubectl uncordon ip-192-168-0-103 (master)

$master> kubectl get node

$worker> apt-mark unhold kubeadm ; apt-get update && apt-get install -y kubeadm=1.19.0-00; apt-mark hold kubeadm

$master> kubectl drain (ip-192-168-0-174) worker --ignore-daemonsets

$worker> kubeadm upgrade node

$worker> apt-mark unhold kubelet kubectl; apt-get install -y kubelet=1.19.0-00 kubectl=1.19.0-00; apt-mark hold kubelet kubectl

$worker> systemctl daemon-reload; systemctl restart kubelet

$master> kubectl get node

$master> kubectl uncordon ip-192-168-0-174 (worker)

$master> kubectl get nodes

Working with CPU and Memory Constraints

$master> kubectl create deployment hog --image vish/stress

$master> kubectl get deployments

$master> kubectl describe deployment hog

$master> kubectl get deployment hog -o yaml

$master> kubectl get deployment hog -o yaml > hog.yaml

$master> vim hog.yaml (remove thestatusoutput,creationTimestampand other settings)

  imagePullPolicy: Always
   name: hog
   resources:                    # Edit to remove {}
     limits:                       # Add these 4 lines
       memory:"4Gi"
     requests:
       memory:"2500Mi"
   terminationMessagePath: /dev/termination-log
   terminationMessagePolicy: File

$master> kubectl replace -f hog.yaml

$master> kubectl get deployment hog -o yaml

$master> kubectl get po

$master> kubectl logs hog-64cbfcc7cf-lwq66

$master> vim hog.yaml

resources:
  limits:
    cpu:"1"
    memory:"4Gi"
  requests:
    cpu:"0.5"
    memory:"500Mi"
args:
- -cpus
-"2"
- -mem-total
-"950Mi"
- -mem-alloc-size
-"100Mi"
- -mem-alloc-sleep
-"1s"

$master> kubectl delete deployment hog

$master> kubectl create -f hog.yaml

$master> kubectl get pod

$master> kubectl logs hog-1985182137-5bz2w

Resource Limits for a Namespace

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment