Skip to content

Instantly share code, notes, and snippets.

@Amarlanda
Forked from MartinLuksik/certs
Created April 27, 2022 00:25
Show Gist options
  • Save Amarlanda/2b874003e4ab246913fefd88ebfa0804 to your computer and use it in GitHub Desktop.
Save Amarlanda/2b874003e4ab246913fefd88ebfa0804 to your computer and use it in GitHub Desktop.
[Kubectl] Kubectl #kubectl #k8s
## 1:
# Count the Number of Nodes That Are Ready to Run Normal Workloads
k get nodes -o=custom-columns='NAME:.metadata.name, TAINTS:.spec.taints[*]'
# Retrieve Error Messages from a Container Log
k logs data-handler -n backend | grep ERROR > /k8s/0002/errors.txt
# Find the pod with a label of app=auth in the web namespace that Is utilizing the most CPU.
k top pods -n web --selector=app=auth --sort-by=cpu
## 2:
# Edit the Web Frontend Deployment to Expose the HTTP Port
spec:
containers:
- image: nginx:1.14.2
ports:
- containerPort: 80
# Create a Service to Expose the Web Frontend Deployment's Pods Externally
apiVersion: v1
kind: Service
metadata:
name: web-frontend-svc
namespace: web
spec:
type: NodePort
selector:
app: web-frontend
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 30080
# Scale Up the Web Frontend Deployment
kubectl scale deployment web-frontend -n web --replicas=5
# Create an Ingress That Maps to the New Service
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web-frontend-ingress
namespace: web
spec:
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-frontend-svc
port:
number: 80
## 3:
# Create a Service Account
k create serviceaccount webautomation -n web
# Create a ClusterRole That Provides Read Access to Pods
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-reader
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
# Bind the ClusterRole to the Service Account to Only Read Pods in the web Namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rb-pod-reader
namespace: web
subjects:
- kind: ServiceAccount
name: webautomation
roleRef:
kind: ClusterRole
name: pod-reader
apiGroup: rbac.authorization.k8s.io
# verify
kubectl get pods -n web --as=system:serviceaccount:web:webautomation
## 4:
# Back Up the etcd Data
ssh etcd1
ETCDCTL_API=3 etcdctl snapshot save /home/cloud_user/etcd_backup.db \
--endpoints=https://etcd1:2379 \
--cacert=/home/cloud_user/etcd-certs/etcd-ca.pem \
--cert=/home/cloud_user/etcd-certs/etcd-server.crt \
--key=/home/cloud_user/etcd-certs/etcd-server.key
# Restore the etcd Data from the Backup
sudo systemctl stop etcd
# Delete the existing etcd data:
sudo rm -rf /var/lib/etcd
sudo ETCDCTL_API=3 etcdctl snapshot restore /home/cloud_user/etcd_backup.db \
--initial-cluster etcd-restore=https://etcd1:2380 \
--initial-advertise-peer-urls https://etcd1:2380 \
--name etcd-restore \
--data-dir /var/lib/etcd
# Set database ownership:
sudo chown -R etcd:etcd /var/lib/etcd
sudo systemctl start etcd
ETCDCTL_API=3 etcdctl get cluster.name \
--endpoints=https://etcd1:2379 \
--cacert=/home/cloud_user/etcd-certs/etcd-ca.pem \
--cert=/home/cloud_user/etcd-certs/etcd-server.crt \
--key=/home/cloud_user/etcd-certs/etcd-server.key
## 5:
## Upgrade All Kubernetes Components on the Control Plane Node and worker (Upgrade all components on the control plane node to Kubernetes version 1.22.2)
# CONTROL PLANE:
kubeadm version
# obtain available versions of kubeadm
apt list -a kubeadm
# replace x in 1.23.x-00 with the latest patch version
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.23.x-00 && \
apt-mark hold kubeadm
kubectl drain acgk8s-control --ignore-daemonsets
kubeadm upgrade plan
sudo kubeadm upgrade apply v1.23.x
# for other control plane nodes:
# sudo kubeadm upgrade node
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.23.x-00 kubectl=1.23.x-00 && \
apt-mark hold kubelet kubectl
sudo systemctl daemon-reload
sudo systemctl restart kubelet
kubectl uncordon acgk8s-control
# WORKER:
kubectl drain acgk8s-worker1 --ignore-daemonsets
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.22.2-00 && \
apt-mark hold kubeadm
kubeadm upgrade node
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.22.2-00 kubectl=1.22.2-00 && \
apt-mark hold kubelet kubectl
systemctl daemon-reload
systemctl restart kubelet
kubectl uncordon acgk8s-worker1
## 6:
# Drain Worker Node 1
k drain acgk8s-worker1 --ignore-daemonsets --force --delete-emptydir-data=true
# Create a Pod That Will Only Be Scheduled on Nodes with a Specific Label
# add label to the node
k edit node acgk8s-worker2
# create pod with node selector
apiVersion: v1
kind: Pod
metadata:
name: fast-nginx
namespace: dev
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
nodeSelector:
disk: fast
## 7:
# Create a PersistentVolume called host-storage-pv in the acgk8s context and auth namespace. Configure this PersistentVolume so that volumes that use it can be expanded in the future
# Create a Pod That Uses the PersistentVolume for Storage
# Expand the PersistentVolumeClaim
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-storage
provisioner: kubernetes.io/no-provisioner
allowVolumeExpansion: true # it can be expanded in the future
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: host-storage-pv
spec:
storageClassName: localdisk
persistentVolumeReclaimPolicy: Recycle # Configure the PersistentVolumes so that it can be automatically reused if all claims are deleted.
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /etc/data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: host-storage-pvc
namespace: auth
spec:
storageClassName: localdisk
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
---
apiVersion: v1
kind: Pod
metadata:
name: pv-pod
namespace: auth
spec:
containers:
- name: busybox
image: busybox
command: ['sh', '-c', 'while true; do echo success > /output/output.log; sleep 5; done']
volumeMounts:
- name: pv-storage
mountPath: /output
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: host-storage-pvc
## 8:
# Create a NetworkPolicy That Denies All Access to the Maintenance Pod
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: foo
spec:
podSelector:
matchLabels:
app: maintenance
policyTypes:
- Ingress
- Egress
# Create a NetworkPolicy That Allows All Pods in the Users-Backend Namespace to Communicate with Each Other Only on a Specific Port
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: np-users-backend-80
namespace: users-backend
spec:
podSelector: {}
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
app: users-backend
ports:
- protocol: TCP
port: 80
## 9:
# Create a Multi-Container Pod
apiVersion: v1
kind: Pod
metadata:
name: multi
namespace: baz
spec:
containers:
- name: nginx
image: nginx
- name: redis
image: redis
# Create a pod which uses a sidecar to expose the main container's log file to 'stdout'apiVersion: v1
apiVersion: v1
kind: Pod
metadata:
name: logging-sidecar
namespace: baz
spec:
containers:
- name: busybox1
image: busybox
command: ['sh', '-c', 'while true; do echo Logging data > /output/output.log; sleep 5; done']
volumeMounts:
- name: sharedvol
mountPath: /output
- name: sidecar
image: busybox
command: ['sh', '-c', 'tail -f /input/output.log']
volumeMounts:
- name: sharedvol
mountPath: /input
volumes:
- name: sharedvol
emptyDir: {}
## 10:
# Determine which node is not ready
k get nodes
# Fix the issue:
ssh worker2
sudo journalctl -u kubelet
sudo systemctl enable kubelet
sudo systemctl start kubelet
sudo systemctl status kubelet
k get pv --sort-by='spec.capacity.storage'
k delete -n beebox-mobile service beebox-auth-svc
k exec -n beebox-mobile quark -- cat /etc/key/key.txt
# Create yaml output via a dry-run
k create deployment my-deployment --image=nginx --dry-run -o yaml
k create deployment my-deployment --image=nginx --dry-run=client -o yaml
# annotate object with the change:
k scale deployment my-deployment replicas=5 --record
## explore pod resources
# check if metrics service is responsive
k get --raw /apis/metrics.k8s.io
## note that sort by sort the resources precisesly even though it shows very rounded cpu 0m/1m (it's still sorted)
k top pod --all-namespaces --sort-by cpu
## Resources
k get pods -A -o=custom-columns='Namespace:.metadata.namespace','PodName:.metadata.name','RCPU:spec.containers[*].resources.requests.cpu','LCPU:spec.containers[*].resources.limits.cpu','RM:spec.containers[*].resources.requests.memory','LM:spec.containers[*].resources.limits.memory' --sort-by .metadata.namespace
k get pods -A -o=custom-columns='usage:.status'
### node resources
k top node
# selectors (labels)
kubectl get pods --selector=app=cassandra
# check services endpoints to easily verify where they point to
kubectl get endpoints <service object name>
# nslookup to test FQDN from a pod
kubectl exec busybox -- nslookup 10.104.162.248
# note that FQDN is necessary to communicate cross namespaces. <service-name>.<name-space>.svc.cluster.local
# in the same namespace... service name is sufficient
# troubleshooting in a node
sudo journalctl -u kubelet
sudo journalctl -u docker
# closter component logs in /var/log/*.log (not for kubeadm clusters... as the services run in containers)
## kafka topics
k get kafkatopics -A -o custom-columns="TOPICNAME:.spec.topicName"
k get kafkatopics -A -o jsonpath={ .items[*].spec.topicName }
## get images of various resources
k get po -A -o jsonpath="{ .items[*].spec.containers[*].image}" | tr " " "\n"
k get ds -A -o jsonpath="{ .items[*].spec.template.spec.containers[*].image}" | tr " " "\n"
k get sts -A -o jsonpath="{ .items[*].spec.template.spec.containers[*].image}" | tr " " "\n"
k get deploy -A -o jsonpath="{ .items[*].spec.template.spec.containers[*].image}" | tr " " "\n"
k get cronjobs -A -o jsonpath="{ .items[*].spec.jobTemplate.spec.template.spec.containers[*].image}" | tr " " "\n"
@Amarlanda
Copy link
Author

awesome sutff thanks for making public

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment