Skip to content

Instantly share code, notes, and snippets.

@MartinLuksik
Last active July 11, 2023 06:42
Show Gist options
  • Save MartinLuksik/615f9592b00a5e15af380e548b30fcb5 to your computer and use it in GitHub Desktop.
Save MartinLuksik/615f9592b00a5e15af380e548b30fcb5 to your computer and use it in GitHub Desktop.
[Kubectl] Kubectl #kubectl #k8s
kubectl get secret -n myns mysecret -o jsonpath="{.data.tls\.crt}" | base64 -d | openssl x509 -text -noout
# https://kubernetes.io/docs/
# https://kubernetes.io/blog/
# https://helm.sh/docs
alias k=kubectl # will already be pre-configured
alias kn='kubectl config set-context --current --namespace '
export do="--dry-run=client -o yaml" # k create deploy nginx --image=nginx $do > pod.yaml
export now="--force --grace-period 0" # k delete pod x $now
## vim settings:
set tabstop=2
set expandtab
set shiftwidth=2
set number
# 'Shift v' to mark multiple lines, then indent the marked lines press > or < and to repeat the action press .
k -f my.yaml delete $now
k -f my_new.yaml create
# or in one go:
k -f my_new.yaml replace $now
## create yamls (labels have to be updated with vim)
k run mypod --image=busybox:1.31.0 $do --command -- sh -c "touch /tmp/ready && sleep 1d" > mypod.yaml
k -n myns create job neb-new-job --image=busybox:1.31.0 $do > /opt/course/3/job.yaml -- sh -c "sleep 2 && echo
done"
## jsonpath filters
k get pod -o jsonpath='{.status.phase}'
k get pod -o jsonpath='{.items[*].metadata.annotations}' | tr " " "\n"
k get po -A -o custom-columns='NAME:.metadata.name,ANNOTATIONS:.metadata.annotations'
## helm
helm ls -a # add -a to view also charts that are in pendint-state
helm uninstall myreleasename
helm repo list # bitnami -> https://charts.bitnami.com/bitnami
helm repo update
helm search repo nginx # bitnami/nginx 9.5.2 1.21.1 Chart for the nginx server
helm upgrade myreleasename bitnami/nginx
helm rollback # helm rollback <RELEASE> [REVISION]
helm show values bitnami/apache | yq e
helm -n myns install myreleasename bitnami/apache --set replicaCount=3
## secret that belongs to sa
k get secrets -oyaml | grep annotations -A 1 # annotation kubernetes.io/service-account.name
## container probes
```
readinessProbe:
exec:
command:
- sh
- -c
- cat /tmp/ready
initialDelaySeconds: 5
periodSeconds: 10
```
```
livenessProbe:
tcpSocket:
port:
initialDelaySeconds: 10
periodSeconds: 15
```
## rollout
k rollout history deploy mydeploy
k rollout undo deploy mydeploy
## container's security context
```
securityContext:
allowPrivilegeEscalation: false
privileged: false
```
## expose pod/deployment
k expose pod mypod --name mypod-svc --port 3333 --target-port 80 # easier than creating the svc manually because this also generates the label selectors
k get ep,svc
k run mytmppod --restart=Never --rm --image=nginx:alpine -i -- curl http://mypod-svc.myns:3333
k expose deployment mydeploy --name mydeploy-srv --port 9999 --target-port 80
## Docker
docker build -t abc:latest -t abc:v1-docker .
docker push abc:latest
docker push abc:v1-docker
docker image ls
## podman
podman image ls
podman build -t abc:v1-podman .
podman push abc:v1-podman
podman run -d --name abc abc:v1-podman
podman logs abc
podman ps > /containers
## PV
kind: PersistentVolume
apiVersion: v1
metadata:
name: my-pv
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/Volumes/Data"
## PVC
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: my-pvc
namespace: myns
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
## pod spec to mount
volumes:
- name: data
persistentVolumeClaim:
claimName: my-pvc
containers:
- image: httpd
name: container
volumeMounts:
- name: data
mountPath: /tmp
## Storage classes
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mysc
provisioner: my-provisioner
reclaimPolicy: Retain
## PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-pvc
namespace: myns
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
storageClassName: mysc
## secrets
k create secret generic mysecret --from-literal user=test --from-literal pass=pwd
## mount secret as volume:
volumes:
- name: sec-volume
secret:
secretName: mysecret
containers:
- volumeMounts:
- name: sec-volume
mountPath: /tmp/secret
## also secrets as env vars:
env:
- name: SECRET_USER
valueFrom:
secretKeyRef:
name: mysecret
key: user
## also all secrets as env vars where var name will be the same as in the secret:
envFrom:
- secretRef: # also works for configMapRef
name: mysecret
## configmaps
k create configmap mycm --from-file=index.html=/opt/index.html #configure the key "index.html"!
## shared volume between containers in a pod
volumes:
- name: logs
emptyDir: {}
initContainers:
- name: init
image: bash:5.0.11
command: ['bash', '-c', 'echo init > /var/log/cleaner/cleaner.log']
volumeMounts:
- name: logs
mountPath: /var/log/cleaner
## useful commands for troubleshooting
['bash', '-c', 'while true; do echo `date`: "something happened" >> /var/log/my.log; sleep 1; done']
["sh", "-c", "tail -f /var/log/my.log"]
['sh', '-c', 'echo "Lets get started!" > /tmp/web/index.html']
## svc nodeport config
.spec.ports.nodeport: 30100 #otherwise assigned automatically
.spec.type: Nodeport
## wget
k exec mypod -- wget -O- www.google.com
k exec run mypod --restart=Never --rm -i --image=busybox -i -- wget -O- mysvc:80
## egress to pods with labels app: api OR DNS
policyTypes:
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: api
- ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
## pod labels & annotations
k get pod --show-labels
k get pod -l app=api
k label pod -l app=api important=true # add label important to pods with label app=api
k label pod -l "app in (api,ui)" secured=true # add label important to pods with label app=api or app=ui
k annotate pod -l secured=true team="api team" # add annotation to pod with label secured=true
# Understand Rolling Update Deployment including maxSurge and maxUnavailable
kubectl rollout undo deployment/nginx-deployment --to-revision=2
## 1:
# Count the Number of Nodes That Are Ready to Run Normal Workloads
k get nodes -o=custom-columns='NAME:.metadata.name, TAINTS:.spec.taints[*]'
# Retrieve Error Messages from a Container Log
k logs data-handler -n backend | grep ERROR > /k8s/0002/errors.txt
# Find the pod with a label of app=auth in the web namespace that Is utilizing the most CPU.
k top pods -n web --selector=app=auth --sort-by=cpu
## 2:
# Edit the Web Frontend Deployment to Expose the HTTP Port
spec:
containers:
- image: nginx:1.14.2
ports:
- containerPort: 80
# Create a Service to Expose the Web Frontend Deployment's Pods Externally
apiVersion: v1
kind: Service
metadata:
name: web-frontend-svc
namespace: web
spec:
type: NodePort
selector:
app: web-frontend
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 30080
# Scale Up the Web Frontend Deployment
kubectl scale deployment web-frontend -n web --replicas=5
# Create an Ingress That Maps to the New Service
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web-frontend-ingress
namespace: web
spec:
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-frontend-svc
port:
number: 80
## 3:
# Create a Service Account
k create serviceaccount webautomation -n web
# Create a ClusterRole That Provides Read Access to Pods
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-reader
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
# Bind the ClusterRole to the Service Account to Only Read Pods in the web Namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rb-pod-reader
namespace: web
subjects:
- kind: ServiceAccount
name: webautomation
roleRef:
kind: ClusterRole
name: pod-reader
apiGroup: rbac.authorization.k8s.io
# verify
kubectl get pods -n web --as=system:serviceaccount:web:webautomation
## 4:
# Back Up the etcd Data
ssh etcd1
ETCDCTL_API=3 etcdctl snapshot save /home/cloud_user/etcd_backup.db \
--endpoints=https://etcd1:2379 \
--cacert=/home/cloud_user/etcd-certs/etcd-ca.pem \
--cert=/home/cloud_user/etcd-certs/etcd-server.crt \
--key=/home/cloud_user/etcd-certs/etcd-server.key
# Restore the etcd Data from the Backup
sudo systemctl stop etcd
# Delete the existing etcd data:
sudo rm -rf /var/lib/etcd
sudo ETCDCTL_API=3 etcdctl snapshot restore /home/cloud_user/etcd_backup.db \
--initial-cluster etcd-restore=https://etcd1:2380 \
--initial-advertise-peer-urls https://etcd1:2380 \
--name etcd-restore \
--data-dir /var/lib/etcd
# Set database ownership:
sudo chown -R etcd:etcd /var/lib/etcd
sudo systemctl start etcd
ETCDCTL_API=3 etcdctl get cluster.name \
--endpoints=https://etcd1:2379 \
--cacert=/home/cloud_user/etcd-certs/etcd-ca.pem \
--cert=/home/cloud_user/etcd-certs/etcd-server.crt \
--key=/home/cloud_user/etcd-certs/etcd-server.key
## 5:
## Upgrade All Kubernetes Components on the Control Plane Node and worker (Upgrade all components on the control plane node to Kubernetes version 1.22.2)
# CONTROL PLANE:
kubeadm version
# obtain available versions of kubeadm
apt list -a kubeadm
# replace x in 1.23.x-00 with the latest patch version
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.23.x-00 && \
apt-mark hold kubeadm
kubectl drain acgk8s-control --ignore-daemonsets
kubeadm upgrade plan
sudo kubeadm upgrade apply v1.23.x
# for other control plane nodes:
# sudo kubeadm upgrade node
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.23.x-00 kubectl=1.23.x-00 && \
apt-mark hold kubelet kubectl
sudo systemctl daemon-reload
sudo systemctl restart kubelet
kubectl uncordon acgk8s-control
# WORKER:
kubectl drain acgk8s-worker1 --ignore-daemonsets
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.22.2-00 && \
apt-mark hold kubeadm
kubeadm upgrade node
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.22.2-00 kubectl=1.22.2-00 && \
apt-mark hold kubelet kubectl
systemctl daemon-reload
systemctl restart kubelet
kubectl uncordon acgk8s-worker1
## 6:
# Drain Worker Node 1
k drain acgk8s-worker1 --ignore-daemonsets --force --delete-emptydir-data=true
# Create a Pod That Will Only Be Scheduled on Nodes with a Specific Label
# add label to the node
k edit node acgk8s-worker2
# create pod with node selector
apiVersion: v1
kind: Pod
metadata:
name: fast-nginx
namespace: dev
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
nodeSelector:
disk: fast
## 7:
# Create a PersistentVolume called host-storage-pv in the acgk8s context and auth namespace. Configure this PersistentVolume so that volumes that use it can be expanded in the future
# Create a Pod That Uses the PersistentVolume for Storage
# Expand the PersistentVolumeClaim
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-storage
provisioner: kubernetes.io/no-provisioner
allowVolumeExpansion: true # it can be expanded in the future
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: host-storage-pv
spec:
storageClassName: localdisk
persistentVolumeReclaimPolicy: Recycle # Configure the PersistentVolumes so that it can be automatically reused if all claims are deleted.
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /etc/data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: host-storage-pvc
namespace: auth
spec:
storageClassName: localdisk
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
---
apiVersion: v1
kind: Pod
metadata:
name: pv-pod
namespace: auth
spec:
containers:
- name: busybox
image: busybox
command: ['sh', '-c', 'while true; do echo success > /output/output.log; sleep 5; done']
volumeMounts:
- name: pv-storage
mountPath: /output
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: host-storage-pvc
## 8:
# Create a NetworkPolicy That Denies All Access to the Maintenance Pod
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: foo
spec:
podSelector:
matchLabels:
app: maintenance
policyTypes:
- Ingress
- Egress
# Create a NetworkPolicy That Allows All Pods in the Users-Backend Namespace to Communicate with Each Other Only on a Specific Port
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: np-users-backend-80
namespace: users-backend
spec:
podSelector: {}
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
app: users-backend
ports:
- protocol: TCP
port: 80
## 9:
# Create a Multi-Container Pod
apiVersion: v1
kind: Pod
metadata:
name: multi
namespace: baz
spec:
containers:
- name: nginx
image: nginx
- name: redis
image: redis
# Create a pod which uses a sidecar to expose the main container's log file to 'stdout'apiVersion: v1
apiVersion: v1
kind: Pod
metadata:
name: logging-sidecar
namespace: baz
spec:
containers:
- name: busybox1
image: busybox
command: ['sh', '-c', 'while true; do echo Logging data > /output/output.log; sleep 5; done']
volumeMounts:
- name: sharedvol
mountPath: /output
- name: sidecar
image: busybox
command: ['sh', '-c', 'tail -f /input/output.log']
volumeMounts:
- name: sharedvol
mountPath: /input
volumes:
- name: sharedvol
emptyDir: {}
## 10:
# Determine which node is not ready
k get nodes
# Fix the issue:
ssh worker2
sudo journalctl -u kubelet
sudo systemctl enable kubelet
sudo systemctl start kubelet
sudo systemctl status kubelet
k get pv --sort-by='spec.capacity.storage'
k delete -n beebox-mobile service beebox-auth-svc
k exec -n beebox-mobile quark -- cat /etc/key/key.txt
# Create yaml output via a dry-run
k create deployment my-deployment --image=nginx --dry-run -o yaml
k create deployment my-deployment --image=nginx --dry-run=client -o yaml
# annotate object with the change:
k scale deployment my-deployment replicas=5 --record
## explore pod resources
# check if metrics service is responsive
k get --raw /apis/metrics.k8s.io
## note that sort by sort the resources precisesly even though it shows very rounded cpu 0m/1m (it's still sorted)
k top pod --all-namespaces --sort-by cpu
## Resources
k get pods -A -o=custom-columns='Namespace:.metadata.namespace','PodName:.metadata.name','RCPU:spec.containers[*].resources.requests.cpu','LCPU:spec.containers[*].resources.limits.cpu','RM:spec.containers[*].resources.requests.memory','LM:spec.containers[*].resources.limits.memory' --sort-by .metadata.namespace
k get pods -A -o=custom-columns='usage:.status'
### node resources
k top node
# selectors (labels)
kubectl get pods --selector=app=cassandra
# check services endpoints to easily verify where they point to
kubectl get endpoints <service object name>
# nslookup to test FQDN from a pod
kubectl exec busybox -- nslookup 10.104.162.248
# note that FQDN is necessary to communicate cross namespaces. <service-name>.<name-space>.svc.cluster.local
# in the same namespace... service name is sufficient
# troubleshooting in a node
sudo journalctl -u kubelet
sudo journalctl -u docker
# closter component logs in /var/log/*.log (not for kubeadm clusters... as the services run in containers)
# delete all failed pods - useful after jobs failed
kubectl delete pods --field-selector status.phase=Failed -n foo
## kafka topics
k get kafkatopics -A -o custom-columns="TOPICNAME:.spec.topicName"
k get kafkatopics -A -o jsonpath={ .items[*].spec.topicName }
## get images of various resources
k get po -A -o jsonpath="{ .items[*].spec.containers[*].image}" | tr " " "\n"
k get ds -A -o jsonpath="{ .items[*].spec.template.spec.containers[*].image}" | tr " " "\n"
k get sts -A -o jsonpath="{ .items[*].spec.template.spec.containers[*].image}" | tr " " "\n"
k get deploy -A -o jsonpath="{ .items[*].spec.template.spec.containers[*].image}" | tr " " "\n"
k get cronjobs -A -o jsonpath="{ .items[*].spec.jobTemplate.spec.template.spec.containers[*].image}" | tr " " "\n"
# scale down
kubectl scale statefulset mq-0 --replicas=0
kubectl patch pvc mad-rmq-data --patch '{"spec":{"resources":{"requests":{"storage":"1024Gi"}}}}'
#pv needs to be reattached to a VM to be resized, so just scale up the statefulset again
kubectl scale statefulset mq-0 --replicas=1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment