Skip to content

Instantly share code, notes, and snippets.

@fl64
Last active June 20, 2024 06:51
Show Gist options
  • Save fl64/4d7bade27a561d805bb7b8b0378d10ab to your computer and use it in GitHub Desktop.
Save fl64/4d7bade27a561d805bb7b8b0378d10ab to your computer and use it in GitHub Desktop.
bookmarks and snippets
[url "https://oauth2:{{ token }}@gitlab.xxx.ru/"]
insteadOf = https://gitlab.xxx.ru/
[core]
excludesfile = ~/.gitignore
attributesfile = ~/.gitattributes
[alias]
lg = log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit
lg1 = log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold green)(%ar)%C(reset) %C(white)%s%C(reset) %C(dim white)- %an%C(reset)%C(bold yellow)%d%C(reset)' --all
lg2 = log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold cyan)%aD%C(reset) %C(bold green)(%ar)%C(reset)%C(bold yellow)%d%C(reset)%n'' %C(white)%s%C(reset) %C(dim white)- %an%C(reset)' --all
lg3 = log --graph --all --date=relative --pretty=format:'%Cred%h %Creset%<|(100,trunc)%s %C(bold blue)<%an>%Creset %Cgreen(%cd)%Creset%C(auto)%d'
br = branch --format='%(HEAD) %(color:yellow)%(refname:short)%(color:reset) - %(contents:subject) %(color:green)(%(committerdate:relative)) [%(authorname)]' --sort=-committerdate
conflict = diff --name-only --diff-filter=U
fire = !sh -c \"git checkout -b wip/fire/ptishkov && \\ git stage -A && \\ git commit -m wip-fire-alert && \\ git push origin wip/fire/ptishkov\"\n
lf = log -p -M --follow --stat --
wdiff = diff --word-diff
cdiff = diff --color-words
[blame]
coloring = highlightRecent
[color "blame"]
highlightRecent = blue,12 month ago,white,1 month ago,red
[diff "ansible-vault"]
textconv = ansible-vault view
[color]
ui = 1
[help]
autocorrect = 0
# ansible.cfg
## add timestamps
callback_whitelist = profile_tasks
# tests
- name: Check required input parameters
assert:
that:
- ({{ item }} is defined)
- ({{ item }} is not none)
- ({{ item }} | trim != '')
with_items:
- os_name
- os_version
loop_control:
loop_var: item
# delete files in the folder except for the folder itself
- name: find files in dir
find:
paths: /test_folder
patterns: "*"
register: result
- name: remove files
file:
path: "{{ item.path }}"
state: absent
with_items: "{{ result.files }}"
# is true
{% if some_var is defined and some_var is sameas true %}
#!/usr/bin/env bash
echo 'source <(kubectl completion bash)' >>~/.bashrc
echo 'alias k=kubectl' >>~/.bashrc
echo 'complete -o default -F __start_kubectl k' >>~/.bashrc
alias k=kubectl && complete -o default -F __start_kubectl k
echo 'set et sw=2 ts=2 sts=2' >>~/.vimrc
# minic client
curl https://dl.min.io/client/mc/release/linux-amd64/mc --create-dirs -o /usr/local/bin/mc && chmod +x /usr/local/bin/mc
export MC_HOST_<alias>=https://<Access Key>:<Secret Key>:<Session Token>@<YOUR-S3-ENDPOINT>
# get latest release from github
curl -s https://api.github.com/repos/derailed/k9s/releases | jq '[.[].tag_name | select(.|test("rc|alpha")|not)]|sort|reverse|first' -r
# crane
curl -L https://github.com/google/go-containerregistry/releases/download/v0.16.1/go-containerregistry_Linux_x86_64.tar.gz | tar -xz -C /usr/local/bin/ crane
# kubectl
curl -L "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -o /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl
# k9s
curl -L https://github.com/derailed/k9s/releases/download/v0.32.4/k9s_Linux_amd64.tar.gz | tar -xz -C /usr/local/bin/ "k9s"
# circtl
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.27.0/crictl-v1.27.0-linux-amd64.tar.gz | tar -xz -C /usr/local/bin/
# grpcurl
curl -L https://github.com/fullstorydev/grpcurl/releases/download/v1.8.7/grpcurl_1.8.7_linux_x86_64.tar.gz | tar -xz -C /usr/local/bin/
# helm 3
curl https://get.helm.sh/helm-v3.10.2-linux-amd64.tar.gz | tar -xz --strip 1 -C /usr/local/bin/
# kustomzie
curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv4.5.7/kustomize_v4.5.7_linux_amd64.tar.gz | tar -xz -C /usr/local/bin/
# stern
curl -L https://github.com/stern/stern/releases/download/v1.30.0/stern_1.30.0_linux_amd64.tar.gz | tar -xz -C /usr/local/bin/ "stern"
curl -L https://github.com/stern/stern/releases/download/v1.28.0/stern_1.28.0_linux_amd64.tar.gz | tar -xz -C .
# krew https://github.com/kubernetes-sigs/krew/releases/download/v0.4.4/krew-linux_amd64.tar.gz
curl -L https://github.com/kubernetes-sigs/krew/releases/download/v0.4.4/krew-linux_amd64.tar.gz | tar -xz -C /usr/local/bin/ "krew"
# goss
curl -L https://github.com/goss-org/goss/releases/download/v0.3.20/goss-linux-amd64 -o /usr/local/bin/goss
# certinfo
curl -L https://github.com/pete911/certinfo/releases/download/v1.0.13/certinfo_1.0.13_linux_amd64.tar.gz | tar -zx -C /usr/local/bin/
# gomplate
curl -L https://github.com/hairyhenderson/gomplate/releases/download/v3.11.3/gomplate_linux-amd64-slim -o /usr/local/bin/gomplate
chmod +x /usr/local/bin/gomplate
# sd
curl -L https://github.com/chmln/sd/releases/download/v0.7.6/sd-v0.7.6-x86_64-unknown-linux-gnu -o /usr/local/bin/sd && chmod +x /usr/local/bin/sd
# kubevirt
export VERSION=$(curl -s https://api.github.com/repos/kubevirt/kubevirt/releases | jq '[.[].tag_name | select(.|test("rc|alpha")|not)]|sort|reverse|first' -r)
curl -L https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/virtctl-${VERSION}-linux-amd64 -o /usr/local/bin/virtctl
sudo bash -c "curl -L https://github.com/kubevirt/kubevirt/releases/download/v0.59.0/virtctl-v0.59.0-linux-amd64 -o /usr/local/bin/virtctl && chmod +x /usr/local/bin/virtctl"
# cilium-cli
curl -L https://github.com/cilium/cilium-cli/releases/download/v0.13.1/cilium-linux-amd64.tar.gz | tar -xz -C /usr/local/bin/
# hubble
HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt)
curl -Lsq https://github.com/cilium/hubble/releases/download/${HUBBLE_VERSION}/hubble-linux-amd64.tar.gz | tar -xz hubble -C /usr/local/bin/
# istioctl
curl -Lsq https://github.com/istio/istio/releases/download/1.17.2/istioctl-1.17.2-linux-amd64.tar.gz | tar -xz istioctl -C ./
echo 'source <(hubble completion bash)' >>~/.bashrc
echo 'source <(helm completion bash)' >>~/.bashrc
echo 'source <(kustomize completion bash)' >>~/.bashrc
source ~/.bashrc
#!/usr/bin/env bash
echo 'source <(kubectl completion bash)' >>~/.bashrc
echo 'alias k=kubectl' >>~/.bashrc
echo 'complete -o default -F __start_kubectl k' >>~/.bashrc
echo 'set et sw=2 ts=2 sts=2' >>~/.vimrc
# k9s
curl -L https://github.com/derailed/k9s/releases/download/v0.32.4/k9s_Linux_amd64.tar.gz | tar -xz -C /usr/local/bin/ "k9s"
# stern
curl -L https://github.com/stern/stern/releases/download/v1.30.0/stern_1.30.0_linux_amd64.tar.gz | tar -xz -C /usr/local/bin/ "stern"
# krew block
echo 'export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"' >>~/.bashrc
curl -L https://github.com/kubernetes-sigs/krew/releases/download/v0.4.4/krew-linux_amd64.tar.gz | tar -xz -C .
./krew-linux_amd64 install krew
rm krew-linux_amd64
echo 'export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"' >>~/.bashrc
kubectl krew install view-allocations
kubectl krew install ctx
kubectl krew install ns
kubectl krew install blame
curl -s https://api.github.com/repos/kubevirt/kubevirt/releases | jq '.[].tag_name | select(.|test("rc|alpha|beta")|not)' -r | sort -r | head -1
# common ===============================================================================================================
## delete all pods in crash loop backoff
kubectl get pods --all-namespaces --field-selector=status.phase=Running -o json | jq -r '.items[] | select(.status.containerStatuses[].state.waiting.reason=="CrashLoopBackOff") | "kubectl delete pod \(.metadata.name) -n \(.metadata.namespace)"' | sh
## get k8s resources without system fields (https://stackoverflow.com/questions/43941772/get-yaml-for-deployed-kubernetes-services)
kubectl -n d8-system get secret deckhouse-registry -o yaml | yq 'del(.metadata.resourceVersion, .metadata.uid, .metadata.annotations, .metadata.creationTimestamp, .metadata.selfLink, .metadata.managedFields)'
## watch for deployment statuses
kubectl -n echo-00-cilium get deployment -w -o json | jq '.status|del(.conditions)'
## get deployment container "deckhouse" image
kubectl -n d8-system get deploy/deckhouse -o json | jq '.spec.template.spec.containers[] | select( .image | test("deckhouse")) | .image'
## set image for deployment
kubectl -n d8-system set image deploy/deckhouse deckhouse=${IMAGE}
## patch deployment
kubectl patch deployment your_deployment --type merge -p '{"spec":{"template":{"spec":{"terminationGracePeriodSeconds":31}}}}'
## get ds images
kubectl -n d8-ingress-nginx get ds -o json | jq '.items[].spec.template.spec.containers[] | {name,image}' -c
## get pods images
kubectl -n d8-ingress-nginx get pods -o json | jq '.items[].spec.containers[]|{ name, image}' -c
## get image for containers with name
kubectl --context=dev0 -n d8-system get deploy deckhouse -o json | jq '.spec.template.spec.containers[] | select(.name="deckhosue")|.image'
## get image and imageID
kubectl --context=dev1 -n d8-system get pods -l app=deckhouse -o json | jq '.items[].status.containerStatuses[] | select(.name="deckhouse") | { image, imageID }'
## get all pods in not running phase for system components
kubectl get pods -A -o json | jq '.items[]|select(.metadata.namespace|test("(d8.+|kube-system)")) | select(.status.phase|test("Running")|not) | { ns: .metadata.namespace, name: .metadata.name, node: .spec.nodeName, phase: .status.phase }' -c
## get init or containers in not ready phase
kubectl get -n kube-system pods -o json | jq '.items[] | . as $item | .status | { name: $item | .metadata.name , init: [.initContainerStatuses // [] | .[] | select(.ready | not) | .name ], cont: [.containerStatuses // [] | .[] | select(.ready | not) | .name ] }' | jq '. | select((.init| length > 0) or (.cont| length >0))'
## get pod info in one line and sort by ip
kubectl get pods -A -o json | jq '.items[] | [.status.podIP, .spec.nodeName, .metadata.namespace, .metadata.name] | join(" ")' -r | sort -n -t . -k2,2 -k3,3 -k4,4 -k5,5 | column -t
## get svc info in one line and sort by ip
kubectl get svc -A -o json | jq '.items[] | [.spec.clusterIP, .status.loadBalancer.ingress[0].ip // false, .metadata.name, .metadata.namespace ] | join(" ")' -r | sort -n -t . -k2,2 -k3,3 -k4,4 -k5,5 | column -t
## get headless svc
kubectl get svc -A -o json | jq '.items[] | select(.spec.clusterIP=="None") | .metadata | {namespace,name}' -c
## limits requests
kubectl get pods -o=custom-columns=NAME:spec.containers[*].name,MEMREQ:spec.containers[*].resources.requests.memory,MEMLIM:spec.containers[*].resources.limits.memory,CPUREQ:spec.containers[*].resources.requests.cpu,CPULIM:spec.containers[*].resources.limits.cpu
## exec to some shell
kubectl exec -i -t -n default pt-test-pod -c test-pod "--" sh -c "clear; (bash || ash || sh)"
## remove all pod in deployment in loop
while true; do kubectl -n d8-ingress-nginx get pods -l name=main,app=controller -o json | jq .items[].metadata.name -r | xargs -I {} kubectl -n d8-ingress-nginx delete pod "{}"; sleep 1; done
## get all pods created 10 min ago
kubectl get pods --sort-by='.metadata.creationTimestamp' -A -o json | jq '.items[] | select((.status.phase == "Running") and (.status.startTime | fromdateiso8601 > now-60*10)) | {name: .metadata.name, ns: .metadata.namespace, start: .status.startTime}' -c
## remove all failed pods (Complted|Error|...)
kubectl delete pod -A --field-selector=status.phase==Failed
kubectl get pods --field-selector 'status.phase=Failed' --all-namespaces | awk '{if ($4 != "Running") system ("kubectl -n " $1 " delete pods " $2 )}'
# remove pods (spot node problem)
# https://github.com/tyriis/i-see-dead-pods/tree/main
kubectl get pods \
--all-namespaces \
-o go-template \
--template='{{range .items}}{{printf "%s %s %s\n" .metadata.namespace .metadata.name .status.message}}{{end}}' \
| grep "Pod was terminated in response to imminent node shutdown." \
| awk '{print $1, $2}' \
| xargs -n2 kubectl delete pod -n || true
# evict just one pod
kubectl drain $(kubectl -n console-review get pods -l vm.kubevirt.io/name=linux-vm-001 -o json | jq .items[0].spec.nodeName -r) --pod-selector=vm.kubevirt.io/name=linux-vm-001 --delete-emptydir-data
# nodes ================================================================================================================
## get masters annotations
kubectl get nodes -o json | jq '[.items[] | select(.metadata.labels."node-role.kubernetes.io/master"!=null)] | .[].metadata.annotations'
## get master nodes external ip addresses
kubectl get nodes -o json | jq '.items[] | select(.metadata.labels."node-role.kubernetes.io/master"!=null) | .status.addresses | .[] | select (.type=="ExternalIP") | .address' -r
## get worker nodes external ip addresses
kubectl get nodes -o json | jq '.items[] | select(.metadata.labels."node-role.kubernetes.io/master"==null) | .status.addresses | .[] | select (.type=="ExternalIP") | .address' -r
## get nodes info
kubectl get nodes -o json | jq -rc '.items[].status.addresses | reduce .[] as $node ({}; . + { "\($node.type)": "\($node.address)" })'
## get pods by nodes
kubectl get pods --all-namespaces -o wide --field-selector spec.nodeName=<node>
## get pod name by node
kubectl -n d8-cloud-instance-manager get pods -l app=fencing-agent --field-selector spec.nodeName=virtlab-pt-1 -o jsonpath="{.items[0].metadata.name}"
## sort pods by node
kubectl get pods -o wide --sort-by="{.spec.nodeName}"
## get node topoloy lables
kubectl --context dev1 get nodes -o json | jq '.items[].metadata.labels | with_entries( select(.key|contains("topology")))'
# logs =================================================================================================================
## get dh logs without unneded fields
kubectl -n d8-system logs --tail=100 -f deployments/deckhouse | jq -c '. | del(."task.id",."event.id")'
## get dh logs and exclude info level
kubectl -n d8-system logs --tail=100 -f deployments/deckhouse | jq -c '. | del(."task.id",."event.id") | select(.level|test("info")|not)'
# pvcs
## Remove finalizers from pvc
kubectl get pv -o json | jq '.items[] | select(.status.phase=="Released") | .metadata.name' -r | xargs -I {} kubectl patch pv {} --type=json -p '[{ "op": "remove", "path": "/metadata/finalizers" }]'
# events
kubectl get events --field-selector involvedObject.kind=Pod
kubectl get events --field-selector type!=Normal
## cilium | hubble ======================================================================================================
# remove all cilium crds
kubectl get crd -o json | jq '.items[].metadata | select(.name | test("cilium.io")) | .name' -r | xargs -I {} kubectl delete crd "{}"
## enable hubble port-forward
kubectl port-forward -n d8-cni-cilium svc/hubble-relay 4245:443 &
## hubble observe
hubble observe --tls --tls-allow-insecure -f --verdict DROPPED -o jsonpb | jq
# etcd =================================================================================================================
## etcd get members
kubectl -n kube-system exec -ti $(kubectl -n kube-system get pod -l component=etcd,tier=control-plane -o name | head -n1) -- sh -c \
"ETCDCTL_API=3 etcdctl --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/ca.crt --key /etc/kubernetes/pki/etcd/ca.key --endpoints https://127.0.0.1:2379/ member list -w table"
## with all members
kubectl -n kube-system exec -ti $(kubectl -n kube-system get pod -l component=etcd,tier=control-plane -o name | head -n1) -- sh -c "ETCDCTL_API=3 etcdctl --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/ca.crt --key /etc/kubernetes/pki/etcd/ca.key --endpoints $(kubectl -n kube-system get pod -l component=etcd,tier=control-plane -o json | jq '[.items[].status | "https://" + .podIP + ":2379" ] | join(",")' -r) member list -w table"
## etcd perfomance
etcdctl ... check perf
## etcd check ep (get ep from member list)
etcdctl ... endpoint status -w table
## etcd get health
etcdctl ... endpoint --cluster health
# istio ================================================================================================================
## get all istio driven pods
kubectl get pods -A -o json | jq '.items[].metadata | select(.annotations."sidecar.istio.io/status") | { namespace, name }' -c
kubectl get pods -A -o json | jq '.items[].metadata | select(.annotations."sidecar.istio.io/status") | { namespace, name, rev: .annotations."sidecar.istio.io/status" }' -c
## get all istio pods and show name namespace and istio revision
kubectl get pods -A -o json | jq -r '.items[].metadata | select(.annotations."sidecar.istio.io/status") | {name, namespace, rev: .annotations."sidecar.istio.io/status" | fromjson | .revision }'
## count istio resources
for res in $(kubectl api-resources --namespaced --output name | grep istio); do echo "${res}"; kubectl get "${res}" -A 2>/dev/null | wc -l; done
## get istio config
istioctl pc all -n prober deploy/prober --context dev0 -o yaml
# deckhouse ============================================================================================================
## queue len
kubectl -n d8-system exec -ti deploy/deckhouse -- deckhouse-controller queue list | head -n 50
# cluster config
kubectl -n d8-system exec -it deploy/deckhouse -- deckhouse-controller edit provider-cluster-configuration
# converge in cluster
kubectl -n d8-system exec -it deploy/terraform-auto-converger -c converger -- sh
dhctl terraform check
# ingress ==============================================================================================================
# ingress
for i in $(kubectl get -A ing -o json | jq '.items[].spec.rules[].host' -r); do echo -n "$i - "; curl -sq https://${i} -IXGET | head -n1 ; done | uniq
# linstor and kubevirt
# https://curl.se/docs/manpage.html
curl -w "%{local_ip}:%{local_port} -> %{remote_ip}:%{remote_port} = %{response_code}" --silent --fail --show-error --connect-timeout 5 --output /dev/null http://one.one.one.one:80
# 192.168.88.18:48452 -> 1.1.1.1:80 = 200
# curl version 7.88.0 on February 15th, 2023.
# https://daniel.haxx.se/blog/2022/12/28/curl-w-certs/
curl https://curl.se -w "%{certs}" -o /dev/null > cacert.pem
curl --cacert cacert.pem https://curl.se/
curl -w "%{json}" --silent --fail --show-error --connect-timeout 5 --output /dev/null https://echo.lv426.space
# {
# "content_type": "application/json; charset=utf-8",
# "errormsg": null,
# "exitcode": 0,
# "filename_effective": "/dev/null",
# "ftp_entry_path": null,
# "http_code": 200,
# "http_connect": 0,
# "http_version": "2",
# "local_ip": "192.168.88.18",
# "local_port": 44678,
# "method": "GET",
# "num_connects": 1,
# "num_headers": 4,
# "num_redirects": 0,
# "proxy_ssl_verify_result": 0,
# "redirect_url": null,
# "referer": null,
# "remote_ip": "194.61.2.142",
# "remote_port": 443,
# "response_code": 200,
# "scheme": "HTTPS",
# "size_download": 463,
# "size_header": 135,
# "size_request": 78,
# "size_upload": 0,
# "speed_download": 1980,
# "speed_upload": 0,
# "ssl_verify_result": 0,
# "time_appconnect": 0.223067,
# "time_connect": 0.111643,
# "time_namelookup": 0.10566,
# "time_pretransfer": 0.223163,
# "time_redirect": 0,
# "time_starttransfer": 0.233757,
# "time_total": 0.233818,
# "url": "https://echo.lv426.space",
# "url_effective": "https://echo.lv426.space/",
# "urlnum": 0,
# "curl_version": "libcurl/7.81.0 OpenSSL/3.0.2 zlib/1.2.11 brotli/1.0.9 zstd/1.4.8 libidn2/2.3.2 libpsl/0.21.0 (+libidn2/2.3.2) libssh/0.9.6/openssl/zlib nghttp2/1.43.0 librtmp/2.3 OpenLDAP/2.5.13"
# }
# show all labels where key name doesn't contain "kubevirt"
kubectl get nodes virtlab-pt-0 -o json | jq '.metadata.labels | to_entries | map(select(.key | test("kubevirt") | not )) | from_entries'
# same
kubectl get nodes virtlab-pt-0 -o json | jq '.metadata.labels | with_entries( select(.key | test("kubevirt") | not ) )'

get urls for vmexport

kubectl get -n vms vmexport vm-vm1 -o json | jq '.status.links.internal | {manifests: [.manifests[].url ], volumes: [ .volumes[] | .formats[].url ]}'

Tshoot VMS

kubectl patch --type merge -n d8-virtualization kubevirts.kubevirt.io kubevirt -p '{"spec":{"configuration":{"developerConfiguration":{"logVerbosity":{"virtLauncher":8}}}}}'

tail -f /var/log/kube-audit/audit.log | grep vms | jq '{objName: .objectRef.name, objRes: .objectRef.resource, requestURI, verb, user: .user.username, userAgent, resp: .responseStatus.code, stage}' -c

stern -n kube-system -l component=kube-controller-manager
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: np
resources:
- ../echo/
- ./iic.yaml
- ./gw.yaml
- ./vs.yaml
generatorOptions:
disableNameSuffixHash: true
secretGenerator:
- name: webcert
files:
- tls.key=_wildcard.dev1.example.com.pem
- tls.crt=_wildcard.dev1.example.com.pem
type: "kubernetes.io/tls"
- name: audit-policy
namespace: kube-system
files:
- audit-policy.yaml=cfg/audit-policy.yaml
# Получить список всех CRD
crd_names=$(kubectl get crd -o=jsonpath='{.items[*].metadata.name}')
# Применить патч ко всем CRD
for crd_name in $(kubectl get crd -o=jsonpath='{.items[*].metadata.name}'); do
kubectl patch crd $crd_name -p '{"spec": {"conversion": null}}'
done
# Получить все CRD с conversion
kubectl get crd -o json | jq '.items[] | select(.spec.conversion.webhook!=null) | .metadata.name'
[Unit]
Description=Simple serivice
After=docker.service # start after docker
Requires=docker.service
PartOf=docker.service # restart service if docker will be restarted
[Service]
Type=oneshot
ExecStart=/opt/service.sh 2>&1 | logger &
ExecStop=/opt/service.sh off 2>&1 | logger &
RemainAfterExit=true
StandardOutput=journal
[Install]
WantedBy=multi-user.target
## Virtualization on openstack
kubectl -n d8-cni-cilium exec ds/agent -- cilium status
kubectl -n d8-cni-cilium exec ds/agent -- cilium-health status
stern -n kube-system -l component=kube-apiserver --include="timeout"
kubectl -n d8-system exec -it deploy/deckhouse -c deckhouse -- deckhouse-controller queue list
## ModuleHookRun:/modules/user-authn/generate_crowd_basic_auth_proxy_cert:schedule:150-user-authn/hooks/generate_crowd_basic_auth_proxy_cert.go:generate_crowd_basic_auth_proxy_cert:Schedule:failures 493:module hook '150-user-authn/hooks/generate_crowd_basic_auth_proxy_cert.go' failed: job timeout
kubectl -n d8-system get pods -l controller-uid -o json -w | jq .status.containerStatuses
./stern -n d8-system . --tail=0 -l controller-uid
## ephemeral containers
## https://habr.com/ru/companies/timeweb/articles/720510/
export POD_NAME=ubuntu grafana-7f9b89bffc-jm97z
kubectl -n d8-monitoring debug -it --attach=false -c debugger --image ubuntu ${POD_NAME}
kubectl -n d8-monitoring get pods ${POD_NAME} -ojsonpath='{.spec.ephemeralContainers}'
kubectl -n d8-monitoring get pods ${POD_NAME} -ojsonpath='{.status.ephemeralContainerStatuses}'
kubectl patch ${POD_NAME} --patch '
spec:
shareProcessNamespace: true'
kubectl -n d8-monitorin attach -it -c debugger ${POD_NAME}
## kubevirt
stern -n vms . --include "socket" -o json | jq .message -r | jq .msg -r
cat /run/libvirt/qemu/vms_vm-linux-101.xml
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment