Skip to content

Instantly share code, notes, and snippets.

@ormergi
Last active May 17, 2022 14:19
Show Gist options
  • Save ormergi/45213e3e5b8e1a2f2c7a98e4b07eaeb3 to your computer and use it in GitHub Desktop.
Save ormergi/45213e3e5b8e1a2f2c7a98e4b07eaeb3 to your computer and use it in GitHub Desktop.
#!/bin/bash
set -ex
CRI=${CRI:-podman}
# download kind
kind="./kind"
! [ -f $kind ] && \
curl -L "https://github.com/kubernetes-sigs/kind/releases/download/v0.11.1/kind-linux-amd64" -o $kind && \
chmod +x $kind
# spin up kind cluster
CLUSTER_NAME=${CLUSTER_NAME:-test}
! [ $($kind get clusters 2> /dev/null | grep "$CLUSTER_NAME") ] && \
$kind create cluster --name "${CLUSTER_NAME}"
# fetch kubeconfig
kubeconfig="./kubeconfig-${CLUSTER_NAME}"
$kind get kubeconfig --name "${CLUSTER_NAME}" > "${kubeconfig}"
export KUBECONFIG="${kubeconfig}"
# deploy Kubevirt
if ! $(kubectl wait kubevirts kubevirt -n kubevirt --for condition=available --timeout 0s 2> /dev/null ); then
echo "depolying kubevirt..."
kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/v0.53.0/kubevirt-operator.yaml
kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/v0.53.0/kubevirt-cr.yaml
kubectl wait kubevirts kubevirt -n kubevirt --for condition=available --timeout 10m
fi
# install cni plugins on cluster nodes
readonly CNI_VERSION="v1.1.1"
readonly CNI_ARCHIVE="cni-plugins-linux-amd64-${CNI_VERSION}.tgz"
readonly CNI_URL="https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/${CNI_ARCHIVE}"
if [ ! -f ./$CNI_ARCHIVE ]; then
curl -L $CNI_URL -o ./$CNI_ARCHIVE
for node in $(./kind get nodes --name "${CLUSTER_NAME}" | awk '{print $1}'); do
${CRI} cp "./$CNI_ARCHIVE" $node:/
${CRI} exec $node /bin/sh -c "tar -xf $CNI_ARCHIVE -C /opt/cni/bin"
done
fi
# deploy multus
kubectl apply -f https://raw.githubusercontent.com/kubevirt/kubevirtci/2205030954-99bd4d1/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/multus.yaml
kubectl get nodes
kubectl get pods --all-namespaces
echo ""
readonly NAMESPACE="kiagnose"
readonly ADMIN_CLUSTER_ROLE_BINDING="${NAMESPACE}-sa-admin"
readonly FRAMEWORK_IMAGE="quay.io/kiagnose/kiagnose:devel"
readonly VMI_MANAGER_CLUSTER_ROLE_NAME="kubevirt-vmis-manager"
readonly VM_LATENCY_CHECKUP_IMAGE="quay.io/kiagnose/kubevirt-vm-latency:devel"
function cleanup() {
set +e
kubectl delete ns "${NAMESPACE}"
kubectl delete clusterrolebinding "${ADMIN_CLUSTER_ROLE_BINDING}"
kubectl delete ns checkup-workspace
kubectl delete clusterrolebinding "${VMI_MANAGER_CLUSTER_ROLE_NAME}"
set -e
}
trap "cleanup" EXIT
# build kiagnose image
./automation/make.sh --build-core
./automation/make.sh --build-core-image
# build kubevirt-vm-lantechy checkup image
pushd ./checkups/kubevirt-vm-latency
./automation/make.sh --build-checkup
./automation/make.sh --build-checkup-image
popd
# push images to cluster nodes registry
$kind load docker-image "${FRAMEWORK_IMAGE}" --name "${CLUSTER_NAME}"
$kind load docker-image "${VM_LATENCY_CHECKUP_IMAGE}" --name "${CLUSTER_NAME}"
# create namespace for the checkup to run
kubectl create ns "${NAMESPACE}"
# grant kiagnose service-account with cluster-admin permissions
kubectl create clusterrolebinding "${ADMIN_CLUSTER_ROLE_BINDING}" \
--clusterrole=cluster-admin \
--serviceaccount="${NAMESPACE}:default"
# create cluster role required for kubevirt-vm-latency checkup
kubevirt_vm_latency_cluster_roles="./checkups/kubevirt-vm-latency/manifests/clusterroles.yaml"
kubectl apply -f $kubevirt_vm_latency_cluster_roles
# create network-attachment-definition
network_attachment_definition="./checkups/kubevirt-vm-latency/manifests/bridge-network-attachment-definition.yaml"
kubectl apply -f "$network_attachment_definition"
# create checkup configmap
cluster_role_names=$(cat $kubevirt_vm_latency_cluster_roles | grep -Po "(?<=name: ).*")
network_attachment_definition_namespace=$(cat $network_attachment_definition | grep -Po "(?<=namespace: ).*")
network_attachment_definition_name=$(cat $network_attachment_definition | grep -Po "(?<=name: ).*")
# create bridge on cluster nodes
bridge_iface_name=$(cat $network_attachment_definition | grep -Po "(?<=\"name\":).*" | sed 's/[\", ]//g')
set +e
for node in $(./kind get nodes --name "${CLUSTER_NAME}" | awk '{print $1}'); do
${CRI} exec $node ip link add name $bridge_iface_name type bridge
${CRI} exec $node ip link set up dev $bridge_iface_name
done
set -e
VM_LATENCY_CONFIGMAP="results"
CHEKUP_TIMEOUT="10m"
MAX_DESIRED_LATENCY_MILLISECONDS="10"
SAMPLE_DURATION_SECONDS="5"
SOURCE_NODE="test-control-plane"
TARGET_NODE="test-control-plane"
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ${VM_LATENCY_CONFIGMAP}
namespace: ${NAMESPACE}
data:
spec.image: ${VM_LATENCY_CHECKUP_IMAGE}
spec.timeout: ${CHEKUP_TIMEOUT}
spec.clusterRoles: |
${cluster_role_names}
spec.param.network_attachment_definition_namespace: $network_attachment_definition_namespace
spec.param.network_attachment_definition_name: $network_attachment_definition_name
spec.param.max_desired_latency_milliseconds: "$MAX_DESIRED_LATENCY_MILLISECONDS"
spec.param.sample_duration_seconds: "$SAMPLE_DURATION_SECONDS"
spec.param.source_node: "$SOURCE_NODE"
spec.param.target_node: "$TARGET_NODE"
EOF
# execute the checkup
JOB_NAME="kiagnose"
cat <<EOF | kubectl apply -f -
---
apiVersion: batch/v1
kind: Job
metadata:
name: ${JOB_NAME}
namespace: ${NAMESPACE}
spec:
backoffLimit: 0
template:
spec:
restartPolicy: Never
containers:
- name: kiagnose
image: ${FRAMEWORK_IMAGE}
env:
- name: CONFIGMAP_NAMESPACE
value: ${NAMESPACE}
- name: CONFIGMAP_NAME
value: ${VM_LATENCY_CONFIGMAP}
EOF
# wait for kiagnose to start
until [ -n "$(kubectl get pod -n ${NAMESPACE} -l job-name=${JOB_NAME} --no-headers 2> /dev/null)" ]; do
kubectl get pod -n $NAMESPACE -l job-name=${JOB_NAME} --no-headers
sleep 1
done
KIAGNOSE_POD_READY_TIMEOUT="3m"
pod=$(kubectl get pod -n $NAMESPACE -l job-name=${JOB_NAME} --no-headers | awk '{print $1}')
kubectl wait pod $pod -n $NAMESPACE --timeout $KIAGNOSE_POD_READY_TIMEOUT --for condition=ready=true || \
kubectl wait pod $pod -n $NAMESPACE --timeout $KIAGNOSE_POD_READY_TIMEOUT --for condition=ready=false
# print kiagnose logs
kubectl logs -n $NAMESPACE $pod -f
# print results
kubectl get cm $VM_LATENCY_CONFIGMAP -n $NAMESPACE -o jsonpath='{.data}' | \
sed -e 's/,/\n/g' -e 's/}/}\n/g' | \
grep status
echo ""
#sleep 300
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment