Skip to content

Instantly share code, notes, and snippets.

@flavio-fernandes
Last active February 23, 2024 20:22
Show Gist options
  • Save flavio-fernandes/1e21e857d83d4294f8678d5e97eb017d to your computer and use it in GitHub Desktop.
Save flavio-fernandes/1e21e857d83d4294f8678d5e97eb017d to your computer and use it in GitHub Desktop.
steps for deploying ovnk8 using helm -- https://github.com/ovn-org/ovn-kubernetes/pull/4103
install docker, kind, golang, helm
# clone this repo
git clone https://github.com/ovn-org/ovn-kubernetes.git && cd ovn-kubernetes
# create a cluster, with 1 controller and 1 worker node
cat <<EOT > kind.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
networking:
disableDefaultCNI: true
kubeProxyMode: none
EOT
# Ensure that kind network does not have ip6 enabled
enabled_ipv6=$(docker network inspect kind -f '{{.EnableIPv6}}' 2>/dev/null)
[ "${enabled_ipv6}" = "false" ] || {
docker network rm kind 2>/dev/null || :
docker network create kind -o "com.docker.network.bridge.enable_ip_masquerade"="true" -o "com.docker.network.driver.mtu"="1500"
}
[ "${enabled_ipv6}" = "false" ] || { 2&>1 echo the kind network is not what we expected ; exit 1; }
alias k=kubectl
alias kn="kubectl -n ovn-kubernetes"
export KUBECONFIG=~/admin.conf
# start k8 cluster
kind create cluster --config=kind.yaml --kubeconfig ~/admin.conf --retain
# Note: Nodes will not be ready, as there is no CNI configured yet:
$ k get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
kind-control-plane NotReady control-plane 34s v1.24.0 172.21.0.2 <none> Ubuntu 21.10 6.5.12-100.fc37.x86_64 containerd://1.6.4
kind-worker NotReady <none> 9s v1.24.0 172.21.0.3 <none> Ubuntu 21.10 6.5.12-100.fc37.x86_64 containerd://1.6.4
# Build local image and load it into Kind nodes
pushd $(git rev-parse --show-toplevel)/dist/images && \
make fedora && \
kind load docker-image ovn-kube-f:latest && \
popd && \
echo ok
export HOSTIP=$(kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath='{.items[0].status.hostIP}') ; \
export APISERVER="https://${HOSTIP}:6443"
pushd "$(git rev-parse --show-toplevel)/helm/ovn-kubernetes" && \
helm install ovn-kubernetes . -f values.yaml \
--set k8sAPIServer=${APISERVER} \
--set global.image.repository=ovn-kube-f && \
popd && \
echo ok
kn get pod --watch
# to clean up
helm uninstall ovn-kubernetes
kind delete cluster --name kind
install docker, kind, helm (no need to install golang)
# clone this repo
git clone https://github.com/ovn-org/ovn-kubernetes.git && cd ovn-kubernetes
# create a cluster, with 1 controller and 1 worker node
cat <<EOT > kind.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
networking:
disableDefaultCNI: true
kubeProxyMode: none
EOT
# Ensure that kind network does not have ip6 enabled
enabled_ipv6=$(docker network inspect kind -f '{{.EnableIPv6}}' 2>/dev/null)
[ "${enabled_ipv6}" = "false" ] || {
docker network rm kind 2>/dev/null || :
docker network create kind -o "com.docker.network.bridge.enable_ip_masquerade"="true" -o "com.docker.network.driver.mtu"="1500"
}
[ "${enabled_ipv6}" = "false" ] || { 2&>1 echo the kind network is not what we expected ; exit 1; }
alias k=kubectl
alias kn="kubectl -n ovn-kubernetes"
export KUBECONFIG=~/admin.conf
# start k8 cluster
kind create cluster --config=kind.yaml --kubeconfig ~/admin.conf --retain
# Note: Nodes will not be ready, as there is no CNI configured yet:
$ k get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
kind-control-plane NotReady control-plane 34s v1.24.0 172.21.0.2 <none> Ubuntu 21.10 6.5.12-100.fc37.x86_64 containerd://1.6.4
kind-worker NotReady <none> 9s v1.24.0 172.21.0.3 <none> Ubuntu 21.10 6.5.12-100.fc37.x86_64 containerd://1.6.4
export HOSTIP=$(kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath='{.items[0].status.hostIP}') ; \
export APISERVER="https://${HOSTIP}:6443"
pushd "$(git rev-parse --show-toplevel)/helm/ovn-kubernetes" && \
helm install ovn-kubernetes . -f values.yaml \
--set k8sAPIServer=${APISERVER} \
--set global.image.repository=quay.io/ffernandes/ovn-kube-f && \
popd && \
echo ok
kn get pod --watch
# to clean up
helm uninstall ovn-kubernetes
kind delete cluster --name kind
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment