Delete any orphaned CNS volumes found in vCenter without a corresponding Kubernetes PV
#!/bin/bash | |
# Delete any orphaned CNS volumes found in vCenter without a corresponding Kubernetes PV. | |
# | |
# Usage: | |
# ./remove-orphaned-cns-volumes.sh | |
# KUBECONFIG=whatever.kubeconfig FORCE=true ./remove-orphaned-cns-volumes.sh | |
set -euo pipefail | |
delete_volume() { | |
echo "Deleting volume: $1" | |
govc volume.rm "$1" | |
} | |
temp_dir=$(mktemp -d) | |
govc volume.ls | sort > "$temp_dir/vcenter.txt" | |
# Retreive the PV list after CNS volumes to avoid race condition: new volumes may appear in this list, but no new volumes should be accidentally deleted. | |
kubectl get pv -o jsonpath='{range .items[*]}{.spec.csi.volumeHandle}{"\t"}{.metadata.name}{"\n"}{end}' | sort > "$temp_dir/kubernetes.txt" | |
orphaned=$(comm -23 "$temp_dir/vcenter.txt" "$temp_dir/kubernetes.txt") | |
rm -rf "$temp_dir" | |
if [ -z "$orphaned" ] | |
then | |
echo "No orphaned CNS volumes found" | |
exit | |
fi | |
echo "Found potentially orphaned CNS volumes:" | |
echo "$orphaned" | |
if [ -z "${FORCE-}" ] | |
then | |
read -p "Delete CNS volumes? [y/N]" -n 1 -r | |
echo | |
if [[ ! $REPLY =~ ^[Yy]$ ]] | |
then | |
exit 1 | |
fi | |
fi | |
# Some careful wordsplitting: split the loop on lines, and then extract the first tab delimeted argument | |
IFS=$'\n' | |
for volume in $orphaned | |
do | |
IFS=$'\t' | |
# shellcheck disable=SC2086 | |
delete_volume $volume | |
done |
This comment has been minimized.
This comment has been minimized.
I personally don't end up using the contexts feature too often because my clusters have the same admin username and contexts don't work then. For example, deploying multiple identical test clusters, then tearing them down. Instead I specify the |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
This comment has been minimized.
hi @bburky,
thank you very much for your script, it saved me a lot of time.
May I suggest you a modification taking care the possibility to have several K8s clusters hosted on vSphere.
It could include Kubernetes cluster not on vSphere but the probability to have same PV name is zero.
Thanks again
Farid