-
-
Save Madhu-1/2f5db960884671942540f06c599e50c2 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash -e | |
# Storage providers and the default storage class is not needed for Ceph-CSI | |
# testing. In order to reduce resources and potential conflicts between storage | |
# plugins, disable them. | |
function disable_storage_addons() { | |
${minikube} addons disable default-storageclass 2>/dev/null || true | |
${minikube} addons disable storage-provisioner 2>/dev/null || true | |
} | |
function wait_for_ssh() { | |
local tries=100 | |
while ((tries > 0)); do | |
if minikube ssh echo connected &>/dev/null; then | |
return 0 | |
fi | |
tries=$((tries - 1)) | |
sleep 0.1 | |
done | |
echo ERROR: ssh did not come up >&2 | |
exit 1 | |
} | |
# minikube has the Busybox losetup, and that does not work with raw-block PVCs. | |
# Copy the host losetup executable and hope it works. | |
# | |
# See https://github.com/kubernetes/minikube/issues/8284 | |
function minikube_losetup() { | |
# scp should not ask for any confirmation | |
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "$(minikube ssh-key)" /sbin/losetup docker@"$(minikube ip)":losetup | |
# replace /sbin/losetup symlink with the executable | |
minikube ssh 'sudo sh -c "rm -f /sbin/losetup && cp ~docker/losetup /sbin"' | |
} | |
ROOK_VERSION=${ROOK_VERSION:-"v1.4.5"} | |
ROOK_DEPLOY_TIMEOUT=${ROOK_DEPLOY_TIMEOUT:-300} | |
ROOK_URL="https://raw.githubusercontent.com/rook/rook/${ROOK_VERSION}/cluster/examples/kubernetes/ceph" | |
ROOK_CEPH_CLUSTER_VERSION="v14.2.10" | |
KUBECTL_RETRY=5 | |
KUBECTL_RETRY_DELAY=10 | |
# configure minikube | |
KUBE_VERSION=${KUBE_VERSION:-"v1.19.2"} | |
CONTAINER_CMD=${CONTAINER_CMD:-"docker"} | |
MEMORY=${MEMORY:-"4096"} | |
CPUS=${CPUS:-"2"} | |
VM_DRIVER=${VM_DRIVER:-"kvm2"} | |
DISK="sda1" | |
if [[ "${VM_DRIVER}" == "kvm2" ]]; then | |
# use vda1 instead of sda1 when running with the libvirt driver | |
DISK="vda1" | |
fi | |
#feature-gates for kube | |
K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true"} | |
kubectl_retry() { | |
local retries=0 action="${1}" ret=0 stdout stderr | |
shift | |
# temporary files for kubectl output | |
stdout=$(mktemp rook-kubectl-stdout.XXXXXXXX) | |
stderr=$(mktemp rook-kubectl-stderr.XXXXXXXX) | |
while ! kubectl "${action}" "${@}" 2>"${stderr}" 1>"${stdout}"; do | |
# in case of a failure when running "create", ignore errors with "AlreadyExists" | |
if [ "${action}" == 'create' ]; then | |
# count lines in stderr that do not have "AlreadyExists" | |
ret=$(grep -cvw 'AlreadyExists' "${stderr}") | |
if [ "${ret}" -eq 0 ]; then | |
# Succes! stderr is empty after removing all "AlreadyExists" lines. | |
break | |
fi | |
fi | |
retries=$((retries + 1)) | |
if [ ${retries} -eq ${KUBECTL_RETRY} ]; then | |
ret=1 | |
break | |
fi | |
sleep ${KUBECTL_RETRY_DELAY} | |
done | |
# write output so that calling functions can consume it | |
cat "${stdout}" >/dev/stdout | |
cat "${stderr}" >/dev/stderr | |
rm -f "${stdout}" "${stderr}" | |
return ${ret} | |
} | |
function rook_version() { | |
echo "${ROOK_VERSION#v}" | cut -d'.' -f"${1}" | |
} | |
function deploy_rook() { | |
kubectl_retry create -f "${ROOK_URL}/common.yaml" | |
kubectl_retry create -f "${ROOK_URL}/operator.yaml" | |
# find out the rook version to decide on the ceph cluster image to be used | |
ROOK_MAJOR=$(rook_version 1) | |
ROOK_MINOR=$(rook_version 2) | |
if { [ "${ROOK_MAJOR}" -eq 1 ] && [ "${ROOK_MINOR}" -le 2 ]; }; then | |
ROOK_CEPH_CLUSTER_VERSION_IMAGE_PATH="image: ceph/ceph:${ROOK_CEPH_CLUSTER_VERSION}" | |
# upgrade ceph cluster version to 14.2.10 to support CephFS snapshot functionalities. | |
TEMP_DIR="$(mktemp -d)" | |
curl -o "${TEMP_DIR}"/cluster-test.yaml "${ROOK_URL}/cluster-test.yaml" | |
sed -i "s|image.*|${ROOK_CEPH_CLUSTER_VERSION_IMAGE_PATH}|g" "${TEMP_DIR}"/cluster-test.yaml | |
cat "${TEMP_DIR}"/cluster-test.yaml | |
kubectl_retry create -f "${TEMP_DIR}/cluster-test.yaml" | |
rm -rf "${TEMP_DIR}" | |
else | |
# add "mon_warn_on_pool_no_redundancy = false" to ceph.conf if missing | |
# see https://github.com/rook/rook/pull/5925 for upstream status | |
TEMP_DIR="$(mktemp -d)" | |
curl -o "${TEMP_DIR}"/cluster-test.yaml "${ROOK_URL}/cluster-test.yaml" | |
if ! grep -q mon_warn_on_pool_no_redundancy "${TEMP_DIR}"/cluster-test.yaml; then | |
sed -i '/osd_pool_default_size =/a \ mon_warn_on_pool_no_redundancy = false' "${TEMP_DIR}"/cluster-test.yaml | |
fi | |
kubectl_retry create -f "${TEMP_DIR}/cluster-test.yaml" | |
rm -rf "${TEMP_DIR}" | |
fi | |
kubectl_retry create -f "${ROOK_URL}/toolbox.yaml" | |
kubectl_retry create -f "${ROOK_URL}/filesystem-test.yaml" | |
kubectl_retry create -f "${ROOK_URL}/pool-test.yaml" | |
# Check if CephCluster is empty | |
if ! kubectl_retry -n rook-ceph get cephclusters -oyaml | grep 'items: \[\]' &>/dev/null; then | |
check_ceph_cluster_health | |
fi | |
# Check if CephFileSystem is empty | |
if ! kubectl_retry -n rook-ceph get cephfilesystems -oyaml | grep 'items: \[\]' &>/dev/null; then | |
check_mds_stat | |
fi | |
# Check if CephBlockPool is empty | |
if ! kubectl_retry -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &>/dev/null; then | |
check_rbd_stat "" | |
fi | |
} | |
function check_ceph_cluster_health() { | |
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do | |
echo "Wait for rook deploy... ${retry}s" && sleep 5 | |
CEPH_STATE=$(kubectl_retry -n rook-ceph get cephclusters -o jsonpath='{.items[0].status.state}') | |
CEPH_HEALTH=$(kubectl_retry -n rook-ceph get cephclusters -o jsonpath='{.items[0].status.ceph.health}') | |
echo "Checking CEPH cluster state: [$CEPH_STATE]" | |
if [ "$CEPH_STATE" = "Created" ]; then | |
if [ "$CEPH_HEALTH" = "HEALTH_OK" ]; then | |
echo "Creating CEPH cluster is done. [$CEPH_HEALTH]" | |
break | |
fi | |
fi | |
done | |
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then | |
echo "[Timeout] CEPH cluster not in a healthy state (timeout)" | |
exit 1 | |
fi | |
echo "" | |
} | |
function check_mds_stat() { | |
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do | |
FS_NAME=$(kubectl_retry -n rook-ceph get cephfilesystems.ceph.rook.io -ojsonpath='{.items[0].metadata.name}') | |
echo "Checking MDS ($FS_NAME) stats... ${retry}s" && sleep 5 | |
ACTIVE_COUNT=$(kubectl_retry -n rook-ceph get cephfilesystems myfs -ojsonpath='{.spec.metadataServer.activeCount}') | |
ACTIVE_COUNT_NUM=$((ACTIVE_COUNT + 0)) | |
echo "MDS ($FS_NAME) active_count: [$ACTIVE_COUNT_NUM]" | |
if ((ACTIVE_COUNT_NUM < 1)); then | |
continue | |
else | |
if kubectl_retry -n rook-ceph get pod -l rook_file_system=myfs | grep Running &>/dev/null; then | |
echo "Filesystem ($FS_NAME) is successfully created..." | |
break | |
fi | |
fi | |
done | |
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then | |
echo "[Timeout] Failed to get ceph filesystem pods" | |
exit 1 | |
fi | |
echo "" | |
} | |
function check_rbd_stat() { | |
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do | |
if [ -z "$1" ]; then | |
RBD_POOL_NAME=$(kubectl_retry -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}') | |
else | |
RBD_POOL_NAME=$1 | |
fi | |
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5 | |
TOOLBOX_POD=$(kubectl_retry -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}') | |
TOOLBOX_POD_STATUS=$(kubectl_retry -n rook-ceph get pod "$TOOLBOX_POD" -ojsonpath='{.status.phase}') | |
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && | |
{ | |
echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]" | |
continue | |
} | |
if kubectl_retry exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$RBD_POOL_NAME" &>/dev/null; then | |
echo "RBD ($RBD_POOL_NAME) is successfully created..." | |
break | |
fi | |
done | |
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then | |
echo "[Timeout] Failed to get RBD pool stats" | |
exit 1 | |
fi | |
echo "" | |
} | |
case "${1:-}" in | |
up) | |
disable_storage_addons | |
echo "starting minikube with kubeadm bootstrapper" | |
# shellcheck disable=SC2086 | |
minikube start --force --memory="${MEMORY}" --cpus="${CPUS}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}" | |
# create a link so the default dataDirHostPath will work for this | |
# environment | |
if [[ "${VM_DRIVER}" != "none" ]]; then | |
wait_for_ssh | |
# shellcheck disable=SC2086 | |
minikube ssh "sudo mkdir -p /mnt/${DISK}/var/lib/rook;sudo ln -s /mnt/${DISK}/var/lib/rook /var/lib/rook" | |
minikube_losetup | |
fi | |
sudo -S qemu-img create -f raw /var/lib/libvirt/images/minikube-box2-vm-disk1-50G 50G | |
virsh -c qemu:///system attach-disk minikube --source /var/lib/libvirt/images/minikube-box2-vm-disk1-50G --target vdb --cache none | |
virsh -c qemu:///system reboot --domain minikube | |
minikube start --force --memory="${MEMORY}" --cpus="${CPUS}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}" | |
minikube kubectl -- cluster-info | |
;; | |
delete) | |
minikube delete | |
;; | |
ssh) | |
echo "connecting to minikube" | |
minikube ssh | |
;; | |
deploy-rook) | |
echo "deploy rook" | |
deploy_rook | |
;; | |
*) | |
echo " $0 [command] | |
Available Commands: | |
up Starts a local kubernetes cluster and prepare disk for rook | |
delete Delete a running local kubernetes cluster | |
ssh Log into or run a command on a minikube machine with SSH | |
deploy-rook Deploy rook to minikube | |
" >&2 | |
;; | |
esac |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment