Skip to content

Instantly share code, notes, and snippets.

@fl64
Last active June 20, 2024 06:11
Show Gist options
  • Save fl64/494dcedeb722abbdb1363f2862dba9c1 to your computer and use it in GitHub Desktop.
Save fl64/494dcedeb722abbdb1363f2862dba9c1 to your computer and use it in GitHub Desktop.
remove dvp
#!/usr/bin/env bash
### ITS DRAFT!!!
# kubectl api-resources --api-group="kubevirt.io" -o name
# kubectl api-resources --api-group="cdi.kubevirt.io" -o name
green=$(tput setaf 2)
red=$(tput setaf 1)
yellow=$(tput setaf 3)
sgr0=$(tput sgr0)
function remove_resources() {
for api in ${1}; do
echo $api
if [[ $api == "CustomResourceDefinition" ]]; then continue; fi
resources=$(kubectl get ${api} -A -o=json 2>/dev/null | jq -c '.items[].metadata | {namespace, name}')
if [[ $resources == "" ]]; then
continue;
fi
while read resource; do
ns=$(echo $resource | jq -r '.namespace');
name=$(echo $resource | jq -r '.name');
if [[ $ns != "" || $name != "" ]]; then
if [[ $ns != "null" ]]; then
echo "name: ${red}${name}${sgr0}, ns: ${red}${ns}${sgr0}";
#kubectl -n $ns patch $api $name --type=json -p '[{ "op": "remove", "path": "/metadata/ownerReferences" }]' 2>/dev/null
kubectl -n $ns delete $api $name --force --grace-period=0 --timeout=0 #2>/dev/null
kubectl -n $ns patch $api $name --type=json -p '[{ "op": "remove", "path": "/metadata/finalizers" }]' 2>/dev/null
else
echo "name: ${red}${name}${sgr0}";
#kubectl patch $api $name --type=json -p '[{ "op": "remove", "path": "/metadata/ownerReferences" }]' 2>/dev/null
kubectl patch $api $name --type=json -p '[{ "op": "remove", "path": "/metadata/finalizers" }]' 2>/dev/null
kubectl delete $api $name --force --grace-period=0 --timeout=0 #2>/dev/null
fi
fi
done <<< "${resources}"
done
}
# try to remove all DVP resources
dvp_resources=$(kubectl api-resources -o name | grep -E "^[a-z]+.virtualization\.deckhouse\.io$")
dvp_resources_reverse=$(tac <<< ${dvp_resources})
remove_resources "${dvp_resources_reverse}"
# try to remove all CORE resources
core_resources=$(kubectl api-resources -o name | grep -E '(cdi|dvpinternal|kubevirt)')
remove_resources "${core_resources}"
# pvc
while read pvc_name pvc_ns; do
#kubectl -n $pvc_ns patch pvc $pvc_name --type=json -p '[{ "op": "remove", "path": "/metadata/ownerReferences" }]' 2>/dev/null
kubectl -n $pvc_ns patch pvc $pvc_name --type=json -p '[{ "op": "remove", "path": "/metadata/finalizers" }]' 2>/dev/null
kubectl -n $pvc_ns delete pvc $pvc_name --force --grace-period=0 --timeout=5 2>/dev/null
done <<< $(kubectl get pvc -A -l app=containerized-data-importer -o json | jq '.items[].metadata | .name+" "+.namespace' -r)
# pv
while read pv_name; do
echo "$pv_name"
kubectl patch pv $pv_name --type=json -p '[{ "op": "remove", "path": "/metadata/finalizers" }]' #2>/dev/null
kubectl delete pv $pv_name --force --grace-period=0 #--timeout=5 #2>/dev/null
done <<< $(kubectl get pv -A -o json | jq '.items[].metadata | select(.finalizers[]=="virtualization.deckhouse.io/pv-protection") | .name' -r)
# pdb
while read pvc_name pvc_ns; do
#kubectl -n $pvc_ns patch pvc $pvc_name --type=json -p '[{ "op": "remove", "path": "/metadata/ownerReferences" }]' 2>/dev/null
kubectl -n $pvc_ns patch pvc $pvc_name --type=json -p '[{ "op": "remove", "path": "/metadata/finalizers" }]' 2>/dev/null
kubectl -n $pvc_ns delete pvc $pvc_name --force --grace-period=0 --timeout=5 2>/dev/null
done <<< $(kubectl get -A pdb -o json | jq '.items[].metadata | select(.ownerReferences // [] != []) | select(.ownerReferences[].kind == "VirtualMachineInstance") | .name+" "+.namespace' -r)
#!/usr/bin/env bash
# This script removes almost every resource created by virtualization module,
# cdi-operator and virt-operator.
#
# The idea is to not rely on working operators (cdi-operator and virt-operator).
# This script stops operators and Deckhouse and delete all known resources explicitly.
# Also, it removes Helm releases and waits until d8-virtualization namespace is gone.
# Then it starts Deckhouse and waits until deploy/deckhouse is ready.
#
# The virtualization module is disabled at the end, so it is your responsibility to enable it again.
red=$(tput setaf 1)
green=$(tput setaf 2)
gold=$(tput setaf 3)
bold=$(tput bold)
sgr0=$(tput sgr0)
error_echo(){
echo "$red$bold$@ $sgr0"
}
title_echo(){
echo "$gold$bold$@ $sgr0"
}
success_echo(){
echo "$green$bold$@ $sgr0"
}
warn_echo(){
echo "$gold$bold$@ $sgr0"
}
# Remove finalizers on resource.
# Examples:
# __rmfin -n d8-virtualization dvpinternalkubevirts.internal.deckhouse.io kubevirt
# __rmfin clusterrole cdi-apiserver
__rmfin() {
echo " Remove finalizers from $@"
kubectl patch --type=merge -p '{"metadata":{"finalizers":null}}' $@
}
__d8_queue_main() {
kubectl -n d8-system exec -ti $( (kubectl -n d8-system get leases/deckhouse-leader-election -o jsonpath={.spec.holderIdentity} 2>/dev/null || echo "deploy/deckhouse") | cut -d. -f1) -c deckhouse -- deckhouse-controller queue main
}
check_dvp_resources() {
title_echo "Checking DVP resources created by the user ..."
hasResources=false
if kubectl get crds -l module=virtualization | grep virtualization.deckhouse.io 2>&1 > /dev/null ; then
for crd in $(kubectl get crds -l module=virtualization -o name | cut -d/ -f2) ; do
if [[ "${crd}" =~ (virtualmachinecpumodels|virtualmachineipaddressleases) ]] ; then
continue
fi
title_echo " Checking $(echo $crd | cut -d. -f1) in all namespaces ..."
resources=$(kubectl get $crd -A 2>&1)
echo "${resources}" | grep "No resources found" 2>&1 >/dev/null || {
hasResources=true
}
echo "${resources}"
done
fi
if [ "$hasResources" = true ] ; then
error_echo "Remove custom DVP resources before uninstalling DVP!"
exit 1
else
success_echo "No DVP resources left, proceed to uninstalling DVP."
fi
}
disable_virtualization_module() {
title_echo "Disable virtualization module ..."
kubectl patch mc virtualization --type='merge' --patch '{"spec":{"enabled":false}}'
title_echo "Stop Deckhouse ..."
kubectl -n d8-system scale deploy/deckhouse --replicas=0
# Delete Helm releases of the virtualization module.
kubectl -n d8-system delete secret -l name=virtualization
title_echo "Disable cdi deployments ..."
kubectl -n d8-virtualization scale deploy/cdi-operator --replicas=0
kubectl -n d8-virtualization scale deploy/cdi-apiserver --replicas=0
kubectl -n d8-virtualization scale deploy/cdi-deployment --replicas=0
title_echo "Disable kubevirt deployments and ds ..."
kubectl -n d8-virtualization scale deploy/virt-operator --replicas=0
kubectl -n d8-virtualization scale deploy/virt-api --replicas=0
kubectl -n d8-virtualization scale deploy/virt-controller --replicas=0
kubectl -n d8-virtualization scale deploy/virt-exportproxy --replicas=0
kubectl -n d8-virtualization delete ds/virt-handler
title_echo "Disable dvp deployments ..."
kubectl -n d8-virtualization scale deploy/virtualization-controller --replicas=0
kubectl -n d8-virtualization scale deploy/virtualization-api --replicas=0
kubectl -n d8-virtualization scale deploy/dvcr --replicas=0
kubectl -n d8-virtualization delete ds/vmi-router
}
# Do not rely on cdi-operator as it may be broken.
remove_cdi() {
# CDI keeps cluster wide resources in place. Remove them.
title_echo " Remove cluster resources created by cdi-operator ..."
kubectl delete validatingwebhookconfigurations -l operator.cdi.kubevirt.io/createVersion
kubectl delete mutatingwebhookconfigurations -l operator.cdi.kubevirt.io/createVersion
kubectl delete apiservices -l operator.cdi.kubevirt.io/createVersion
kubectl delete clusterroles -l operator.cdi.kubevirt.io/createVersion
kubectl delete clusterrolebindings -l operator.cdi.kubevirt.io/createVersion
title_echo " Remove namespaced resources created by cdi-operator ... "
kubectl -n d8-virtualization delete service -l app.kubernetes.io/managed-by=cdi-operator
kubectl -n d8-virtualization delete deploy -l app.kubernetes.io/managed-by=cdi-operator
kubectl -n d8-virtualization delete serviceaccount -l app.kubernetes.io/managed-by=cdi-operator
kubectl -n d8-virtualization delete role -l app.kubernetes.io/managed-by=cdi-operator
kubectl -n d8-virtualization delete rolebinding -l app.kubernetes.io/managed-by=cdi-operator
kubectl -n d8-virtualization delete secret -l app.kubernetes.io/managed-by=cdi-operator
kubectl -n d8-virtualization delete configmap -l app.kubernetes.io/managed-by=cdi-operator
title_echo " Remove CRDs related to CDI ..."
kubectl delete crds -l operator.cdi.kubevirt.io/createVersion
title_echo " Remove CDI configuration ..."
__rmfin dvpinternalcdis.internal.virtualization.deckhouse.io cdi
kubectl delete dvpinternalcdis.internal.virtualization.deckhouse.io cdi
__rmfin dvpinternalcdis.internal.virtualization.deckhouse.io config
kubectl delete dvpinternalcdis.internal.virtualization.deckhouse.io config
kubectl delete crd dvpinternalcdis.internal.virtualization.deckhouse.io
}
remove_kubevirt() {
title_echo " Remove cluster resources created by the virt-operator ..."
kubectl delete validatingwebhookconfigurations -l app.kubernetes.io/managed-by=virt-operator
kubectl delete mutatingwebhookconfigurations -l app.kubernetes.io/managed-by=virt-operator
kubectl delete apiservices -l app.kubernetes.io/managed-by=virt-operator
kubectl delete clusterroles -l app.kubernetes.io/managed-by=virt-operator
kubectl delete clusterrolebindings -l app.kubernetes.io/managed-by=virt-operator
title_echo " Remove namespaced resources created by the virt-operator ... "
kubectl -n d8-virtualization delete service -l app.kubernetes.io/managed-by=virt-operator
kubectl -n d8-virtualization delete deploy -l app.kubernetes.io/managed-by=virt-operator
kubectl -n d8-virtualization delete ds -l app.kubernetes.io/managed-by=virt-operator
kubectl -n d8-virtualization delete serviceaccount -l app.kubernetes.io/managed-by=virt-operator
kubectl -n d8-virtualization delete role -l app.kubernetes.io/managed-by=virt-operator
kubectl -n d8-virtualization delete rolebinding -l app.kubernetes.io/managed-by=virt-operator
kubectl -n d8-virtualization delete secret -l app.kubernetes.io/managed-by=virt-operator
kubectl -n d8-virtualization delete configmap -l app.kubernetes.io/managed-by=virt-operator
title_echo " Remove CRDs related to KubeVirt ..."
kubectl delete crds -l app.kubernetes.io/managed-by=virt-operator
title_echo " Remove KubeVirt configuration ..."
__rmfin -n d8-virtualization dvpinternalkubevirts.internal.virtualization.deckhouse.io kubevirt
kubectl -n d8-virtualization delete dvpinternalkubevirts.internal.virtualization.deckhouse.io kubevirt || true
__rmfin -n d8-virtualization dvpinternalkubevirts.internal.virtualization.deckhouse.io config
kubectl -n d8-virtualization delete dvpinternalkubevirts.internal.virtualization.deckhouse.io config || true
kubectl delete crd dvpinternalkubevirts.internal.virtualization.deckhouse.io || true
title_echo " Remove KubeVirt labels from nodes ..."
for node in $(kubectl get no -o name) ; do
kubectl get ${node} -o json | jq '
(.metadata.labels | to_entries | map(select(.key | test("kubevirt.io") | not)) | from_entries) as $filtered
| [{"op":"replace","path":"/metadata/labels", "value": $filtered}]
' -cM | kubectl patch ${node} --type=json --patch-file=/dev/stdin
# Remove 'kubevirt.io/heartbeat' annotation.
kubectl annotate ${node} kubevirt.io/heartbeat-
# Remove kubevirt devices
kubectl patch ${node} --type=json -p '[{"op":"remove", "path":"/status/capacity/devices.kubevirt.io~1kvm"}]' --subresource=status
kubectl patch ${node} --type=json -p '[{"op":"remove", "path":"/status/capacity/devices.kubevirt.io~1tun"}]' --subresource=status
kubectl patch ${node} --type=json -p '[{"op":"remove", "path":"/status/capacity/devices.kubevirt.io~1vhost-net"}]' --subresource=status
done
}
remove_dvp() {
title_echo "Remove DVP resources ..."
# Note: no labels on these 2 resources. It is sad.
kubectl delete validatingwebhookconfigurations virtualization-controller-admission-webhook
kubectl delete apiservice v1alpha2.subresources.virtualization.deckhouse.io
kubectl delete clusterrole -l module=virtualization
kubectl delete clusterrolebinding -l module=virtualization
kubectl get virtualmachinecpumodels.virtualization.deckhouse.io -o name | xargs kubectl patch --type=merge -p '{"metadata":{"finalizers":null}}'
kubectl get virtualmachinecpumodels.virtualization.deckhouse.io -o name | xargs kubectl delete
kubectl get virtualmachineipaddressleases.virtualization.deckhouse.io -o name | xargs kubectl patch --type=merge -p '{"metadata":{"finalizers":null}}'
kubectl get virtualmachineipaddressleases.virtualization.deckhouse.io -o name | xargs kubectl delete
kubectl delete -n d8-monitoring servicemonitors -l module=virtualization
kubectl delete PriorityClass -l module=virtualization
# TODO ask in #dev-network if it needs to be deleted.
# kubectl delete CiliumIdentity -l io.kubernetes.pod.namespace=d8-virtualization
# TODO delete labels and annotations from Node and CiliumNode.
kubectl delete crds -l module=virtualization
title_echo "Remove d8-virtualization namespace"
kubectl delete ns d8-virtualization --wait=false
ready=false
count=30
for i in $(seq 1 $count) ; do
echo "Wait until d8-virtualization namespace is gone ${i}/${count}"
if kubectl wait ns d8-virtualization --for delete --timeout=60s ; then
ready=true
break
fi
kubectl get ns d8-virtualization -o json | jq .status
done
if [ "$ready" = true ] ; then
success_echo "DVP is removed!"
else
error_echo "d8-virtualization namespace is stuck, check it manually and then enable Deckhouse by scaling deploy/deckhouse replicas"
exit 1
fi
}
start_deckhouse() {
# TODO HA installation will change replicas to 3, isn't it?
kubectl -n d8-system scale deploy/deckhouse --replicas=1
ready=false
count=30
for i in $(seq 1 $count) ; do
echo "Wait until deckhouse is ready ${i}/${count}"
if kubectl -n d8-system wait deploy/deckhouse --for condition=available --timeout=60s; then
ready=true
break
fi
__d8_queue_main | head -n 6
done
if [ "$ready" = true ]; then
success_echo "Deckhouse is Ready!"
else
error_echo "Deckhouse is not ready after 30m, check its queue for errors:"
__d8_queue_main | head -n25
exit 1
fi
}
main(){
#check_dvp_resources
disable_virtualization_module
remove_cdi
remove_kubevirt
remove_dvp
start_deckhouse
echo
warn_echo "==================[ NOTE !!! ]===================="
warn_echo "Module virtualization is in the DISABLED state now,"
warn_echo "use this command to install virtualization again:"
echo
echo "kubectl patch mc virtualization --type=merge -p '{\"spec\":{\"enabled\":true}}'"
imageTag=$(kubectl get mpo virtualization -o jsonpath={.spec.imageTag} || echo "NO_TAG")
if [[ $imageTag != "NO_TAG" ]] ; then
echo
echo Also check spec.imageTag in mpo/virtualization! It is set to "'"${imageTag}"'".
fi
}
main $@
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment