Skip to content

Instantly share code, notes, and snippets.

@berndonline
Last active November 22, 2025 16:07
Show Gist options
  • Select an option

  • Save berndonline/448fb2f3aade4b8f2df99bf8f0deb402 to your computer and use it in GitHub Desktop.

Select an option

Save berndonline/448fb2f3aade4b8f2df99bf8f0deb402 to your computer and use it in GitHub Desktop.
#!/usr/bin/env bash
#
# KIND cluster bootstrapper with add-ons
# --------------------------------------
# Reference gist: https://gist.github.com/berndonline/448fb2f3aade4b8f2df99bf8f0deb402
# Provisions a local Kubernetes-in-Docker (KIND) cluster and optionally
# installs common components: CNIs (Calico), ingress controllers (NGINX), and
# MetalLB.
#
# Flow summary
# 1) Parse flags/env and validate combinations
# 2) Ensure required CLIs (if sudo) and run preflight checks (docker/jq)
# 3) Render KIND config via jinja2 and create the cluster
# 4) Install requested add-ons and wait for readiness
#
# Requirements: Linux, Docker; optional apt for auto-install of j2cli.
DIR="$(dirname -- "$(readlink -f "${BASH_SOURCE}")")"
# Path for the default generated KIND config template
DEFAULT_KIND_CONFIG="${DIR}/kind.yaml.j2"
KIND_VERSION=v0.30.0
KUBECTL_VERSION=v1.34.0
KUBECTX_VERSION=0.9.5
KUBENS_VERSION=0.9.5
HELM_VERSION=v3.18.6
KUSTOMIZE_VERSION=v5.7.1
# Calico release without leading 'v' (URL will prefix it)
CALICO_VERSION=3.30.3
# Envoy Gateway release tag uses the pattern 'vX.Y.Z'
ENVOY_GATEWAY_VERSION=v1.6.0
# MetalLB release without leading 'v' (URL will prefix it)
METALLB_VERSION=0.15.2
# OVN release to deploy (used for Helm chart version and images)
OVN_VERSION=v1.1.0
# MetalLB pool range (Docker network by default)
METALLB_ADDRESS_POOL=${METALLB_ADDRESS_POOL:-172.18.255.200-172.18.255.250}
# Optional API server JWKS URI for SA OIDC (unset to skip)
ACCOUNT_ISSUER_JWKS_URI=${ACCOUNT_ISSUER_JWKS_URI:-}
# Download a single binary to /usr/local/bin (requires sudo)
install(){
sudo curl -s -Lo /usr/local/bin/$1 $2
sudo chmod +x /usr/local/bin/$1
}
# Download a tarball and extract a path into /usr/local/bin (requires sudo)
install_tgz(){
curl -s -Lo $1 $2
sudo tar xf $1 $4 -C /usr/local/bin/ $3
rm -rf $1
}
# Basic preflight checks for dependencies not installed here
preflight_checks() {
if ! command -v docker >/dev/null 2>&1; then
echo "error: docker is required but not found in PATH"
exit 1
fi
if ! command -v jq >/dev/null 2>&1; then
echo "error: jq is required but not found in PATH (used by kubectl version parsing)"
exit 1
fi
}
# Ensure kind is installed and meets minimum version
install_kind_cli() {
if ! [ -x "$(command -v kind)" ]; then
if [[ "${OSTYPE}" == "linux-gnu" ]]; then
echo 'kind not found, installing'
install "kind" "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64"
else
echo "Missing required binary in path: kind"
return 2
fi
fi
local kind_installed_version installed_raw extracted
installed_raw="$(kind version -q 2>/dev/null | tr -d '\r')"
# normalize to v<semver>
extracted="$(echo "${installed_raw}" | grep -Eo 'v?[0-9]+(\.[0-9]+){1,}' | head -n1)"
if [[ -n "$extracted" && "$extracted" =~ ^v ]]; then kind_installed_version="$extracted"; else kind_installed_version="v$extracted"; fi
if [[ "${KIND_VERSION}" != $(printf "%s\n%s\n" "${KIND_VERSION}" "${kind_installed_version}" | sort -V | head -n1) ]]; then
cat <<EOF
Detected kind version: ${kind_installed_version}.
Requires ${KIND_VERSION} or greater.
Updating kind...
EOF
install "kind" "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64"
fi
}
# Ensure kubectl is installed and meets minimum version
install_kubectl_cli() {
if ! [ -x "$(command -v kubectl)" ]; then
if [[ "${OSTYPE}" == "linux-gnu" ]]; then
echo 'kubectl not found, installing'
install "kubectl" "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
else
echo "Missing required binary in path: kubectl"
return 2
fi
fi
# Skip version compare if jq is unavailable at this point
if ! command -v jq >/dev/null 2>&1; then
return 0
fi
local kubectl_installed_version
kubectl_installed_version="$(kubectl version -ojson | jq -r '.clientVersion.gitVersion')"
if [[ "${KUBECTL_VERSION}" != $(printf "%s\n%s\n" "${KUBECTL_VERSION}" "${kubectl_installed_version}" | sort -V | head -n1) ]]; then
cat <<EOF
Detected kubectl version: ${kubectl_installed_version}.
Requires ${KUBECTL_VERSION} or greater.
Updating kubectl...
EOF
install "kubectl" "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
fi
}
# Ensure kubectx is installed and meets minimum version
install_kubectx_cli() {
if ! [ -x "$(command -v kubectx)" ]; then
if [[ "${OSTYPE}" == "linux-gnu" ]]; then
echo 'kubectx not found, installing'
install_tgz "kubectx_${KUBECTX_VERSION}_linux_x86_64.tar.gz" "https://github.com/ahmetb/kubectx/releases/download/v${KUBECTX_VERSION}/kubectx_v${KUBECTX_VERSION}_linux_x86_64.tar.gz" "kubectx"
else
echo "Missing required binary in path: kubectx"
return 2
fi
fi
local kubectx_installed_version desired installed_raw extracted
installed_raw="$(kubectx --version 2>/dev/null | head -n1 | tr -d '\r')"
# normalize to v<semver>
extracted="$(echo "${installed_raw}" | grep -Eo 'v?[0-9]+(\.[0-9]+){1,}' | head -n1)"
if [[ -n "$extracted" && "$extracted" =~ ^v ]]; then kubectx_installed_version="$extracted"; else kubectx_installed_version="v$extracted"; fi
desired="v${KUBECTX_VERSION}"
if [[ "${desired}" != $(printf "%s\n%s\n" "${desired}" "${kubectx_installed_version}" | sort -V | head -n1) ]]; then
cat <<EOF
Detected kubectx version: ${kubectx_installed_version}.
Requires ${KUBECTX_VERSION} or greater.
Updating kubectx...
EOF
install_tgz "kubectx_${KUBECTX_VERSION}_linux_x86_64.tar.gz" "https://github.com/ahmetb/kubectx/releases/download/v${KUBECTX_VERSION}/kubectx_v${KUBECTX_VERSION}_linux_x86_64.tar.gz" "kubectx"
fi
}
# Ensure kubens is installed and meets minimum version
install_kubens_cli() {
if ! [ -x "$(command -v kubens)" ]; then
if [[ "${OSTYPE}" == "linux-gnu" ]]; then
echo 'kubens not found, installing'
install_tgz "kubens_${KUBENS_VERSION}_linux_x86_64.tar.gz" "https://github.com/ahmetb/kubectx/releases/download/v${KUBENS_VERSION}/kubens_v${KUBENS_VERSION}_linux_x86_64.tar.gz" "kubens"
else
echo "Missing required binary in path: kubens"
return 2
fi
fi
local kubens_installed_version desired installed_raw extracted
installed_raw="$(kubens --version 2>/dev/null | head -n1 | tr -d '\r')"
# normalize to v<semver>
extracted="$(echo "${installed_raw}" | grep -Eo 'v?[0-9]+(\.[0-9]+){1,}' | head -n1)"
if [[ -n "$extracted" && "$extracted" =~ ^v ]]; then kubens_installed_version="$extracted"; else kubens_installed_version="v$extracted"; fi
desired="v${KUBENS_VERSION}"
if [[ "${desired}" != $(printf "%s\n%s\n" "${desired}" "${kubens_installed_version}" | sort -V | head -n1) ]]; then
cat <<EOF
Detected kubens version: ${kubens_installed_version}.
Requires ${KUBENS_VERSION} or greater.
Updating kubens...
EOF
install_tgz "kubens_${KUBENS_VERSION}_linux_x86_64.tar.gz" "https://github.com/ahmetb/kubectx/releases/download/v${KUBENS_VERSION}/kubens_v${KUBENS_VERSION}_linux_x86_64.tar.gz" "kubens"
fi
}
# Ensure kustomize is installed and meets minimum version
install_kustomize_cli() {
if ! [ -x "$(command -v kustomize)" ]; then
if [[ "${OSTYPE}" == "linux-gnu" ]]; then
echo 'kustomize not found, installing'
install_tgz "kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz" "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz" "kustomize"
else
echo "Missing required binary in path: kustomize"
return 2
fi
fi
local kustomize_installed_version
kustomize_installed_version="$(kustomize version --short 2>/dev/null | grep -Eo 'v[0-9]+(\.[0-9]+){1,}' | head -n1)"
if [[ -z "$kustomize_installed_version" ]]; then
kustomize_installed_version="$(kustomize version 2>/dev/null | grep -Eo 'v[0-9]+(\.[0-9]+){1,}' | head -n1)"
fi
if [[ "${KUSTOMIZE_VERSION}" != $(printf "%s\n%s\n" "${KUSTOMIZE_VERSION}" "${kustomize_installed_version}" | sort -V | head -n1) ]]; then
cat <<EOF
Detected kustomize version: ${kustomize_installed_version}.
Requires ${KUSTOMIZE_VERSION} or greater.
Updating kustomize...
EOF
install_tgz "kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz" "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz" "kustomize"
fi
}
# Ensure helm is installed and meets minimum version
install_helm_cli() {
if ! [ -x "$(command -v helm)" ]; then
if [[ "${OSTYPE}" == "linux-gnu" ]]; then
echo 'helm not found, installing'
install_tgz "helm-${HELM_VERSION}-linux-amd64.tar.gz" "https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz" "linux-amd64/helm" "--strip-components=1"
else
echo "Missing required binary in path: helm"
return 2
fi
fi
local helm_installed_version
helm_installed_version="$(helm version --template '{{.Version}}' 2>/dev/null || helm version --short | awk '{print $1}')"
if [[ "${HELM_VERSION}" != $(printf "%s\n%s\n" "${HELM_VERSION}" "${helm_installed_version}" | sort -V | head -n1) ]]; then
cat <<EOF
Detected helm version: ${helm_installed_version}.
Requires ${HELM_VERSION} or greater.
Updating helm...
EOF
install_tgz "helm-${HELM_VERSION}-linux-amd64.tar.gz" "https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz" "linux-amd64/helm" "--strip-components=1"
fi
}
install_j2_renderer() {
if command -v j2 >/dev/null 2>&1; then
return 0
fi
if [[ "${OSTYPE}" == linux* ]] && command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -yq
sudo apt-get install -yq j2cli
else
echo "error: j2 renderer (j2cli) not found and cannot be auto-installed on this OS"
exit 1
fi
}
# Retry wrapper for kubectl to handle transient API readiness
run_kubectl() {
local retries=0
local attempts=60
while true; do
if kubectl "$@"; then
break
fi
((retries += 1))
if [[ "${retries}" -gt ${attempts} ]]; then
echo "error: 'kubectl $*' did not succeed, failing"
exit 1
fi
echo "info: waiting for 'kubectl $*' to succeed..."
sleep 1
done
}
# Optional helper to configure and validate AWS STS federation on the cluster
enable_awssts() {
if ! command -v aws >/dev/null 2>&1; then
echo "error: aws CLI not found; cannot publish JWKS for STS"
exit 1
fi
if [[ -z "${KIND_AWSSTS_S3_BUCKET}" ]]; then
echo "error: --awssts-s3-bucket must be set when --enable-awssts is specified"
exit 1
fi
local jwks_file="${DIR}/jwks.json"
local openid_config_file="${DIR}/openid-configuration.json"
local s3_base="s3://${KIND_AWSSTS_S3_BUCKET}"
kubectl get --raw="/.well-known/openid-configuration" | jq '.' | tee "${openid_config_file}"
kubectl get --raw="/openid/v1/jwks" | jq '.' > "${jwks_file}"
aws s3 cp "${openid_config_file}" "${s3_base}/.well-known/openid-configuration"
aws s3 cp "${jwks_file}" "${s3_base}"
rm -f "${openid_config_file}" "${jwks_file}"
run_awssts_validation
}
run_awssts_validation() {
local pod_name="awscli-sts-check"
kubectl delete pod -n default "${pod_name}" --ignore-not-found=true >/dev/null 2>&1 || true
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: ${pod_name}
namespace: default
spec:
serviceAccountName: default
restartPolicy: Never
containers:
- name: awscli
image: amazon/aws-cli
command: ["sleep","3000"]
env:
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
- name: AWS_ROLE_ARN
value: ${KIND_AWSSTS_ROLE_ARN}
volumeMounts:
- name: aws-token
mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
readOnly: true
volumes:
- name: aws-token
projected:
sources:
- serviceAccountToken:
path: token
audience: sts.amazonaws.com
expirationSeconds: 3600
EOF
if ! run_kubectl -n default wait --for=condition=Ready pod/${pod_name} --timeout=180s; then
echo "error: ${pod_name} pod did not become ready for AWS STS validation"
kubectl logs -n default "${pod_name}" || true
kubectl delete pod -n default "${pod_name}" --ignore-not-found=true >/dev/null 2>&1 || true
exit 1
fi
kubectl exec -n default "${pod_name}" -- aws sts get-caller-identity
kubectl exec -n default "${pod_name}" -- aws s3 ls
kubectl delete pod -n default "${pod_name}" --ignore-not-found=true >/dev/null 2>&1 || true
}
# CLI usage and available flags
usage() {
echo "usage: kind.sh [--name <cluster-name>]"
echo " [--num-workers <num>]"
echo " [--config-file <file>]"
echo " [--kubernetes-version <num>]"
echo " [--cluster-apiaddress <num>]"
echo " [--cluster-apiport <num>]"
echo " [--cluster-loglevel <num>]"
echo " [--cluster-podsubnet <num>]"
echo " [--cluster-svcsubnet <num>]"
echo " [--cluster-account-issuer <url>]"
echo " [--cluster-account-jwks-uri <url>]"
echo " [--disable-default-cni]"
echo " [--add-corporate-ca]"
echo " [--install-calico]"
echo " [--install-ovn]"
echo " [--install-metallb]"
echo " [--metallb-address-pool <start-end>]"
echo " [--install-envoy-gateway]"
echo " [--print-versions]"
echo " [--help]"
echo ""
echo "--name Name of the KIND cluster"
echo " DEFAULT: kind"
echo "--num-workers Number of worker nodes."
echo " DEFAULT: 0 worker nodes."
echo "--config-file Name of the KIND J2 configuration file."
echo " DEFAULT: ./kind.yaml.j2"
echo "--kubernetes-version Flag to specify the Kubernetes version."
echo " DEFAULT: Kubernetes ${KUBECTL_VERSION} (matches kubectl)"
echo "--cluster-apiaddress Kubernetes API IP address for kind (master)."
echo " DEFAULT: 0.0.0.0."
echo "--cluster-apiport Kubernetes API port for kind (master)."
echo " DEFAULT: 6443."
echo "--cluster-loglevel Log level for kind (master)."
echo " DEFAULT: 4."
echo "--cluster-podsubnet Pod subnet IP address range."
echo " DEFAULT: 10.128.0.0/14."
echo "--cluster-svcsubnet Service subnet IP address range."
echo " DEFAULT: 172.30.0.0/16."
echo "--cluster-account-issuer Change account issuer endpoint"
echo " DEFAULT: https://kubernetes.default.svc"
echo "--cluster-account-jwks-uri JWKS URI for API server ServiceAccount OIDC"
echo " DEFAULT: not set"
echo "--enable-awssts Setup STS issuer data and validate projected credentials"
echo " DEFAULT: Do not configure STS."
echo "--awssts-s3-bucket S3 bucket (optionally with prefix) to host the JWKS"
echo " DEFAULT: must be provided with --enable-awssts."
echo "--awssts-role-arn IAM role ARN that the validation pod should assume"
echo " DEFAULT: must be provided with --enable-awssts."
echo "--disable-default-cni Flag to disable Kind default CNI - required to install custom cni plugin."
echo " DEFAULT: Default CNI used."
echo "--add-corporate-ca Add CA ./corporate.crt to use behind corporate proxy."
echo " DEFAULT: Don't add corporate CA file."
echo "--install-calico Flag to install Calico CNI components."
echo " DEFAULT: Don't install calico components."
echo "--install-ovn Flag to install ovn-kubernetes manifests."
echo " DEFAULT: Don't install ovn-kubernetes."
echo "--install-metallb Flag to install Metal LB Components."
echo " DEFAULT: Don't install loadbalancer components."
echo "--metallb-address-pool MetalLB address pool range (start-end)."
echo " DEFAULT: ${METALLB_ADDRESS_POOL}"
echo "--install-envoy-gateway Flag to install Envoy Gateway components."
echo " DEFAULT: Don't install Envoy Gateway components."
echo "--print-versions Print detected tool versions and exit."
echo "--delete Delete Kind cluster."
echo ""
}
# Parse arguments and map them to variables used by the script
parse_args() {
while [ "$1" != "" ]; do
case $1 in
--name ) shift
KIND_CLUSTER_NAME=$1
;;
--num-workers ) shift
if ! [[ "$1" =~ ^[0-9]+$ ]]; then
echo "Invalid num-workers: $1"
usage
exit 1
fi
KIND_NUM_WORKER=$1
;;
--config-file ) shift
if test ! -f "$1"; then
echo "$1 does not exist"
usage
exit 1
fi
KIND_CONFIG=$1
;;
--kubernetes-version ) shift
KIND_K8S_VERSION=$1
;;
--cluster-apiaddress ) shift
KIND_CLUSTER_APIADDRESS=$1
;;
--cluster-apiport ) shift
KIND_CLUSTER_APIPORT=$1
;;
--cluster-loglevel ) shift
if ! [[ "$1" =~ ^[0-9]$ ]]; then
echo "Invalid cluster-loglevel: $1"
usage
exit 1
fi
KIND_CLUSTER_LOGLEVEL=$1
;;
--cluster-podsubnet ) shift
NET_CIDR_IPV4=$1
;;
--cluster-svcsubnet ) shift
SVC_CIDR_IPV4=$1
;;
--cluster-account-issuer ) shift
ACCOUNT_ISSUER_URL=$1
;;
--cluster-account-jwks-uri ) shift
ACCOUNT_ISSUER_JWKS_URI=$1
;;
--enable-awssts ) KIND_ENABLE_AWSSTS=true
;;
--awssts-s3-bucket ) shift
KIND_AWSSTS_S3_BUCKET=$1
;;
--awssts-role-arn ) shift
KIND_AWSSTS_ROLE_ARN=$1
;;
--disable-default-cni ) KIND_DISABLE_DEFAULT_CNI=true
;;
--add-corporate-ca ) KIND_ADD_CORPORATE_CA=true
;;
--install-calico ) KIND_INSTALL_CALICO=true
;;
--install-ovn ) KIND_INSTALL_OVN=true
;;
--install-metallb ) KIND_INSTALL_METALLB=true
;;
--metallb-address-pool ) shift
METALLB_ADDRESS_POOL=$1
;;
--install-envoy-gateway ) KIND_INSTALL_ENVOY_GATEWAY=true
;;
--print-versions ) PRINT_VERSIONS=true
;;
--delete ) delete
exit
;;
--help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
}
# Establish defaults for all flags/env vars
set_default_params() {
KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-kind}
KIND_CONFIG=${KIND_CONFIG:-${DEFAULT_KIND_CONFIG}}
# Default the cluster Kubernetes version to the kubectl client version for easy alignment
KIND_K8S_VERSION=${KIND_K8S_VERSION:-${KUBECTL_VERSION}}
KIND_NUM_WORKER=${KIND_NUM_WORKER:-0}
KIND_CLUSTER_APIADDRESS=${KIND_CLUSTER_APIADDRESS:-0.0.0.0}
KIND_CLUSTER_APIPORT=${KIND_CLUSTER_APIPORT:-6443}
KIND_CLUSTER_LOGLEVEL=${KIND_CLUSTER_LOGLEVEL:-4}
KIND_DISABLE_DEFAULT_CNI=${KIND_DISABLE_DEFAULT_CNI:-false}
KIND_ENABLE_AWSSTS=${KIND_ENABLE_AWSSTS:-false}
KIND_AWSSTS_S3_BUCKET=${KIND_AWSSTS_S3_BUCKET:-}
KIND_AWSSTS_ROLE_ARN=${KIND_AWSSTS_ROLE_ARN:-}
KIND_ADD_CORPORATE_CA=${KIND_ADD_CORPORATE_CA:-false}
KIND_INSTALL_CALICO=${KIND_INSTALL_CALICO:-false}
KIND_INSTALL_OVN=${KIND_INSTALL_OVN:-false}
KIND_INSTALL_METALLB=${KIND_INSTALL_METALLB:-false}
KIND_INSTALL_ENVOY_GATEWAY=${KIND_INSTALL_ENVOY_GATEWAY:-false}
NET_CIDR_IPV4=${NET_CIDR_IPV4:-10.128.0.0/14}
SVC_CIDR_IPV4=${SVC_CIDR_IPV4:-172.30.0.0/16}
ACCOUNT_ISSUER_URL=${ACCOUNT_ISSUER_URL:-https://kubernetes.default.svc}
ACCOUNT_ISSUER_JWKS_URI=${ACCOUNT_ISSUER_JWKS_URI:-}
}
# Print the resolved configuration before provisioning
print_params() {
echo "Using these parameters to install KIND"
echo ""
echo "KIND_CLUSTER_NAME = $KIND_CLUSTER_NAME"
echo "KIND_NUM_WORKER = $KIND_NUM_WORKER"
echo "KIND_CONFIG_FILE = $KIND_CONFIG"
echo "KIND_KUBERNETES_VERSION = $KIND_K8S_VERSION"
echo "KIND_CLUSTER_APIADDRESS = $KIND_CLUSTER_APIADDRESS"
echo "KIND_CLUSTER_APIPORT = $KIND_CLUSTER_APIPORT"
echo "KIND_CLUSTER_LOGLEVEL = $KIND_CLUSTER_LOGLEVEL"
echo "KIND_CLUSTER_PODSUBNET = $NET_CIDR_IPV4"
echo "KIND_CLUSTER_SVCSUBNET = $SVC_CIDR_IPV4"
echo "KIND_CLUSTER_ACCOUNT_ISSUER = $ACCOUNT_ISSUER_URL"
echo "KIND_CLUSTER_ACCOUNT_JWKS_URI = ${ACCOUNT_ISSUER_JWKS_URI:-}"
echo "KIND_DISABLE_DEFAULT_CNI = $KIND_DISABLE_DEFAULT_CNI"
echo "KIND_ENABLE_AWSSTS = $KIND_ENABLE_AWSSTS"
echo "KIND_AWSSTS_S3_BUCKET = ${KIND_AWSSTS_S3_BUCKET:-}"
echo "KIND_AWSSTS_ROLE_ARN = ${KIND_AWSSTS_ROLE_ARN:-}"
echo "KIND_ADD_CORPORATE_CA = $KIND_ADD_CORPORATE_CA"
echo "KIND_INSTALL_CALICO = $KIND_INSTALL_CALICO"
echo "KIND_INSTALL_OVN = $KIND_INSTALL_OVN"
echo "KIND_INSTALL_METALLB = $KIND_INSTALL_METALLB"
echo "METALLB_ADDRESS_POOL = $METALLB_ADDRESS_POOL"
echo "KIND_INSTALL_ENVOY_GATEWAY = $KIND_INSTALL_ENVOY_GATEWAY"
echo ""
}
# Helpers to surface installed tool versions in a consistent way
get_kind_version() {
local raw
raw="$(kind version -q 2>/dev/null | tr -d '\r')"
printf 'v%s' "$(echo "${raw}" | sed -E 's/^v?([0-9]+(\.[0-9]+){1,}).*$/\1/')"
}
get_kubectl_version() {
if command -v jq >/dev/null 2>&1; then
kubectl version -ojson 2>/dev/null | jq -r '.clientVersion.gitVersion'
else
kubectl version --client --short 2>/dev/null | awk '{print $NF}'
fi
}
get_kubectx_version() {
local raw extracted
raw="$(kubectx --version 2>/dev/null | head -n1 | tr -d '\r')"
extracted="$(echo "${raw}" | grep -Eo 'v?[0-9]+(\.[0-9]+){1,}' | head -n1)"
if [[ -n "$extracted" && "$extracted" =~ ^v ]]; then
printf '%s' "$extracted"
elif [[ -n "$extracted" ]]; then
printf 'v%s' "$extracted"
fi
}
get_kubens_version() {
local raw extracted
raw="$(kubens --version 2>/dev/null | head -n1 | tr -d '\r')"
extracted="$(echo "${raw}" | grep -Eo 'v?[0-9]+(\.[0-9]+){1,}' | head -n1)"
if [[ -n "$extracted" && "$extracted" =~ ^v ]]; then
printf '%s' "$extracted"
elif [[ -n "$extracted" ]]; then
printf 'v%s' "$extracted"
fi
}
get_kustomize_version() {
local version
version="$(kustomize version --short 2>/dev/null | grep -Eo 'v[0-9]+(\.[0-9]+){1,}' | head -n1)"
if [[ -z "$version" ]]; then
version="$(kustomize version 2>/dev/null | grep -Eo 'v[0-9]+(\.[0-9]+){1,}' | head -n1)"
fi
printf '%s' "$version"
}
get_helm_version() {
local version
version="$(helm version --template '{{.Version}}' 2>/dev/null)"
if [[ -z "$version" ]]; then
version="$(helm version --short 2>/dev/null | awk '{print $1}')"
fi
printf '%s' "${version:-unknown}"
}
print_tool_version() {
local tool=$1
local desired=$2
local detector=$3
local label=${4:-$tool}
if command -v "$tool" >/dev/null 2>&1; then
local installed
installed="$($detector 2>/dev/null)"
echo "- ${label}: installed=${installed:-unknown} desired=${desired}"
else
echo "- ${label}: not found (desired=${desired})"
fi
}
# Print detected versions for key tools
print_detected_versions() {
echo "Detected tool versions:"
print_tool_version kind "${KIND_VERSION}" get_kind_version
print_tool_version kubectl "${KUBECTL_VERSION}" get_kubectl_version
print_tool_version kubectx "v${KUBECTX_VERSION}" get_kubectx_version
print_tool_version kubens "v${KUBENS_VERSION}" get_kubens_version
print_tool_version kustomize "${KUSTOMIZE_VERSION}" get_kustomize_version
print_tool_version helm "${HELM_VERSION}" get_helm_version
}
# Validate conflicting and dependent flags
validate_flags() {
# Primary vs. secondary CNI handling
local primary_cni_requested=
if [[ "$KIND_INSTALL_CALICO" == true ]]; then
primary_cni_requested="calico"
fi
if [[ "$KIND_INSTALL_OVN" == true ]]; then
if [[ -n "${primary_cni_requested}" ]]; then
echo "error: multiple primary CNIs requested (${primary_cni_requested} and ovn-kubernetes)"
exit 1
fi
primary_cni_requested="ovn-kubernetes"
fi
# If a primary CNI is requested, disable kind's default CNI
if [[ -n "${primary_cni_requested}" && "$KIND_DISABLE_DEFAULT_CNI" != true ]]; then
echo "info: disabling default CNI because a primary CNI install was requested"
KIND_DISABLE_DEFAULT_CNI=true
fi
if [[ "${KIND_ENABLE_AWSSTS}" == true ]]; then
if [[ -z "${KIND_AWSSTS_S3_BUCKET}" ]]; then
echo "error: --awssts-s3-bucket must be set when --enable-awssts is specified"
exit 1
fi
if [[ -z "${KIND_AWSSTS_ROLE_ARN}" ]]; then
echo "error: --awssts-role-arn must be set when --enable-awssts is specified"
exit 1
fi
fi
if [[ "${KIND_INSTALL_ENVOY_GATEWAY}" == true && "${KIND_INSTALL_METALLB}" != true ]]; then
echo "info: enabling MetalLB because Envoy Gateway LoadBalancer services require it in KIND"
KIND_INSTALL_METALLB=true
fi
}
# Render a KIND cluster config via jinja2 (j2cli)
generate_kind_config() {
cat <<EOF >${DEFAULT_KIND_CONFIG}
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
{%- if disable_default_cni is equalto "true" %}
disableDefaultCNI: true
{%- endif %}
{%- if disable_kubeproxy is equalto "true" %}
kubeProxyMode: "none"
{%- endif %}
apiServerAddress: {{ cluster_apiaddress }}
apiServerPort: {{ cluster_apiport }}
{%- if net_cidr %}
podSubnet: "{{ net_cidr }}"
{%- endif %}
{%- if svc_cidr %}
serviceSubnet: "{{ svc_cidr }}"
{%- endif %}
kubeadmConfigPatches:
- |
kind: ClusterConfiguration
metadata:
name: config
apiServer:
extraArgs:
"v": "{{ cluster_loglevel }}"
{%- if account_issuer_url %}
service-account-issuer: {{ account_issuer_url }}
{%- endif %}
{%- if account_issuer_jwks_uri %}
service-account-jwks-uri: {{ account_issuer_jwks_uri }}
service-account-signing-key-file: /etc/kubernetes/pki/sa.key
service-account-key-file: /etc/kubernetes/pki/sa.pub
api-audiences: "https://kubernetes.default.svc"
{%- endif %}
controllerManager:
extraArgs:
"v": "{{ cluster_loglevel }}"
scheduler:
extraArgs:
"v": "{{ cluster_loglevel }}"
---
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
"v": "{{ cluster_loglevel }}"
nodes:
- role: control-plane
{%- if add_corporate_ca is equalto "true" %}
extraMounts:
- containerPath: /usr/local/share/ca-certificates/corporate.crt
hostPath: ${DIR}/corporate.crt
{%- endif %}
{%- for _ in range(num_worker | int) %}
- role: worker
{%- if add_corporate_ca is equalto "true" %}
extraMounts:
- containerPath: /usr/local/share/ca-certificates/corporate.crt
hostPath: ${DIR}/corporate.crt
{%- endif %}
{%- endfor %}
EOF
}
# Create the KIND cluster and export kubeconfig
create_kind_cluster() {
KIND_CONFIG_LCL=${DIR}/kind.yaml
num_worker=$KIND_NUM_WORKER \
cluster_loglevel=$KIND_CLUSTER_LOGLEVEL \
cluster_apiaddress=$KIND_CLUSTER_APIADDRESS \
cluster_apiport=$KIND_CLUSTER_APIPORT \
disable_default_cni=$KIND_DISABLE_DEFAULT_CNI \
disable_kubeproxy=$KIND_INSTALL_OVN \
add_corporate_ca=$KIND_ADD_CORPORATE_CA \
net_cidr=$NET_CIDR_IPV4 \
svc_cidr=$SVC_CIDR_IPV4 \
account_issuer_jwks_uri=$ACCOUNT_ISSUER_JWKS_URI \
account_issuer_url=$ACCOUNT_ISSUER_URL \
j2 "${KIND_CONFIG}" -o "${KIND_CONFIG_LCL}"
if kind get clusters | grep "${KIND_CLUSTER_NAME}"; then
delete
fi
kind create cluster --name "${KIND_CLUSTER_NAME}" --image kindest/node:"${KIND_K8S_VERSION}" --config=${KIND_CONFIG_LCL}
kind export kubeconfig --name "${KIND_CLUSTER_NAME}"
rm -f "${DIR}/kind.yaml"
if [[ "${KIND_CONFIG}" == "${DEFAULT_KIND_CONFIG}" ]]; then
rm -f "${DEFAULT_KIND_CONFIG}"
fi
}
delete() {
# Delete the KIND cluster by name (idempotent)
kind delete cluster --name "${KIND_CLUSTER_NAME:-kind}"
}
kubectl_scaledown_coredns() {
# Reduce CoreDNS to 1 replica to save resources on local hosts
run_kubectl scale deployment --replicas 1 coredns --namespace kube-system
}
ensure_coredns_public_forward() {
# Force CoreDNS to forward to public resolvers (avoids host DNS quirks in KIND/OVN)
local tmp_corefile
tmp_corefile="$(mktemp)"
kubectl -n kube-system get configmap coredns -o jsonpath='{.data.Corefile}' \
| sed 's@forward . /etc.resolv.conf@forward . 1.1.1.1 8.8.8.8@g' > "${tmp_corefile}"
kubectl -n kube-system create configmap coredns --from-file=Corefile="${tmp_corefile}" --dry-run=client -o yaml \
| kubectl apply -f -
rm -f "${tmp_corefile}"
kubectl -n kube-system rollout restart deployment/coredns
kubectl -n kube-system rollout status deployment/coredns --timeout=120s
}
add_corporate_ca() {
# Refresh CA trust inside all KIND node containers (control-plane and workers).
# Requires the CA to be mounted via extraMounts in the generated kind config.
# See https://github.com/kubernetes-sigs/kind/issues/1010
local nodes
nodes=$(docker ps --format '{{.Names}}' | grep -E "^${KIND_CLUSTER_NAME}-(control-plane[0-9]*|worker[0-9]*)$" || true)
if [[ -z "${nodes}" ]]; then
echo "warn: no KIND nodes found for cluster '${KIND_CLUSTER_NAME}'"
return 0
fi
for n in ${nodes}; do
echo "info: updating CA certificates in ${n}"
# Do not allocate a TTY; works in non-interactive contexts
if ! docker exec "${n}" update-ca-certificates; then
echo "warn: update-ca-certificates failed in ${n}"
fi
done
}
install_calico() {
# Install Calico CNI pinned to a release (https://www.projectcalico.org)
run_kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/calico.yaml
sleep 15
if ! kubectl wait -n kube-system --for=condition=ready pods -l k8s-app=calico-node --timeout=300s ; then
echo "some pods in the system are not running"
run_kubectl get pods -A -o wide -n kube-system || true
exit 1
fi
}
install_ovn() {
# Deploy ovn-kubernetes via the published Helm chart repository
local nodes
nodes="$(kind get nodes --name "${KIND_CLUSTER_NAME}" 2>/dev/null || true)"
if [[ -n "${nodes}" ]]; then
echo "info: enabling IPv6 and forwarding on KIND nodes for ovn-kubernetes"
for node in ${nodes}; do
docker exec "${node}" sysctl --ignore net.ipv6.conf.all.disable_ipv6=0 || true
docker exec "${node}" sysctl --ignore net.ipv6.conf.all.forwarding=1 || true
run_kubectl label node "${node}" k8s.ovn.org/zone-name=${node} --overwrite || true
done
fi
local api_host api_endpoint
api_host="$(run_kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath='{.items[0].status.hostIP}' 2>/dev/null | tr -d '\r')"
if [[ -n "${api_host}" ]]; then
api_endpoint="https://${api_host}:6443"
else
api_endpoint="$(run_kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')"
fi
local temp_dir
temp_dir="$(mktemp -d)"
echo "info: downloading ovn-kubernetes sources for ${OVN_VERSION}"
local archive_path="${temp_dir}/ovn-kubernetes.tar.gz"
curl -fsSL "https://github.com/ovn-kubernetes/ovn-kubernetes/archive/refs/tags/${OVN_VERSION}.tar.gz" -o "${archive_path}"
tar -xzf "${archive_path}" -C "${temp_dir}"
local chart_root="${temp_dir}/ovn-kubernetes-${OVN_VERSION#v}/helm/ovn-kubernetes"
local values_file="${chart_root}/values-single-node-zone.yaml"
echo "info: ensuring clean ovn namespaces before install"
kubectl delete namespace ovn-kubernetes --wait=true --ignore-not-found >/dev/null 2>&1 || true
kubectl delete namespace ovn-host-network --wait=true --ignore-not-found >/dev/null 2>&1 || true
kubectl get namespace ovn-kubernetes >/dev/null 2>&1 || kubectl create namespace ovn-kubernetes >/dev/null
kubectl get namespace ovn-host-network >/dev/null 2>&1 || kubectl create namespace ovn-host-network >/dev/null
helm upgrade --install ovn-kubernetes "${chart_root}" \
--namespace ovn-kubernetes \
-f "${values_file}" \
--set-string "k8sAPIServer=${api_endpoint}" \
--set-string "podNetwork=${NET_CIDR_IPV4}" \
--set-string "serviceNetwork=${SVC_CIDR_IPV4}" \
--set-string "global.image.repository=ghcr.io/ovn-kubernetes/ovn-kubernetes/ovn-kube-ubuntu" \
--set-string "global.image.tag=master" \
--set-string "global.v4JoinSubnet=100.64.0.0/16" \
--set-string "global.v6JoinSubnet=fd98::/64" \
--set-string "global.v4MasqueradeSubnet=169.254.0.0/17" \
--set-string "global.v6MasqueradeSubnet=fd69::/112" \
--set-string "global.v4TransitSubnet=100.88.0.0/16" \
--set-string "global.v6TransitSubnet=fd97::/64" \
--set-string "global.egressIpHealthCheckPort=9107" \
--set-string "global.gatewayMode=shared" \
--set "global.enableMultiExternalGateway=true" \
--set "global.enableOvnKubeIdentity=true" \
--set "global.enablePersistentIPs=true" \
--set "global.enableEgressIp=true" \
--set "global.enableEgressService=true" \
--set "global.enableEgressFirewall=true" \
--set "global.enableEgressQos=true" \
--set "tags.ovnkube-control-plane=true" \
--set "tags.ovnkube-single-node-zone=true"
rm -rf "${temp_dir}"
if ! kubectl -n ovn-kubernetes rollout status daemonset/ovnkube-node --timeout=600s; then
echo "error: ovn-kubernetes node daemonset did not become ready"
run_kubectl -n ovn-kubernetes get pods -o wide || true
exit 1
fi
}
install_metallb() {
# Install MetalLB pinned to a release and configure address pool via CRDs
run_kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v${METALLB_VERSION}/config/manifests/metallb-native.yaml
run_kubectl -n metallb-system rollout status deploy/controller --timeout=300s
run_kubectl -n metallb-system rollout status ds/speaker --timeout=300s
# Apply instead of create to avoid errors when rerunning the script.
kubectl create secret generic memberlist \
--from-literal=secretkey="$(openssl rand -base64 128)" \
--dry-run=client -n metallb-system -o yaml | kubectl apply -f -
kubectl apply -f -<<EOF
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: default
namespace: metallb-system
spec:
addresses:
- ${METALLB_ADDRESS_POOL}
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system
spec:
ipAddressPools:
- default
EOF
sleep 15
if ! kubectl wait -n metallb-system --for=condition=ready pods --all --timeout=300s ; then
echo "some pods in the system are not running"
run_kubectl get pods -A -o wide -n metallb-system || true
exit 1
fi
}
install_envoy_gateway() {
# Install Envoy Gateway via the published release manifest
local manifest_url="https://github.com/envoyproxy/gateway/releases/download/${ENVOY_GATEWAY_VERSION}/install.yaml"
run_kubectl apply --server-side -f "${manifest_url}"
if ! kubectl -n envoy-gateway-system wait --for=condition=available deployment/envoy-gateway --timeout=300s; then
echo "error: Envoy Gateway controller failed to become ready"
run_kubectl -n envoy-gateway-system get pods || true
exit 1
fi
if ! kubectl -n envoy-gateway-system wait --for=condition=ready pods -l control-plane=envoy-gateway --timeout=300s; then
echo "error: Envoy Gateway pods did not become ready"
run_kubectl -n envoy-gateway-system get pods || true
exit 1
fi
cat <<EOF
info: Envoy Gateway installed with LoadBalancer services. Use MetalLB-assigned IPs via 'kubectl get svc -n envoy-gateway-system -l gateway.envoyproxy.io/managed-by=envoy-gateway -o wide'.
EOF
create_envoy_gatewayclass
}
create_envoy_gatewayclass() {
kubectl apply -f -<<EOF
apiVersion: gateway.networking.k8s.io/v1
kind: GatewayClass
metadata:
name: envoy
spec:
controllerName: gateway.envoyproxy.io/gatewayclass-controller
---
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: envoy
namespace: envoy-gateway-system
spec:
gatewayClassName: envoy
listeners:
- name: http
protocol: HTTP
port: 80
allowedRoutes:
namespaces:
from: All
EOF
}
kubectl_wait_pods() {
# Gate that waits for kube-system pods to become Ready
sleep 15
if ! kubectl wait -n kube-system --for=condition=ready pods --all --timeout=300s ; then
echo "some pods in the system are not running"
run_kubectl get pods -A -o wide -n kube-system || true
exit 1
fi
}
parse_args "$@"
# If user only wants to print versions, do it early and exit
if [[ "${PRINT_VERSIONS:-false}" == true ]]; then
print_detected_versions
exit 0
fi
# Perform privileged installs only if user is in sudo group
if id -nG | tr ' ' '\n' | grep -qx sudo; then
install_kind_cli
install_kubectl_cli
install_kubectx_cli
install_kubens_cli
install_kustomize_cli
install_helm_cli
fi
install_j2_renderer
set_default_params
print_params
validate_flags
preflight_checks
set -euxo pipefail
# Only generate the default config if using the default path
if [[ "${KIND_CONFIG}" == "${DEFAULT_KIND_CONFIG}" ]]; then
generate_kind_config
fi
create_kind_cluster
kubectl_scaledown_coredns
if [ "$KIND_ADD_CORPORATE_CA" == true ]; then
add_corporate_ca
fi
if [ "$KIND_INSTALL_CALICO" == true ]; then
install_calico
fi
if [ "$KIND_INSTALL_OVN" == true ]; then
install_ovn
ensure_coredns_public_forward
fi
if [ "$KIND_INSTALL_METALLB" == true ]; then
install_metallb
fi
if [ "$KIND_INSTALL_ENVOY_GATEWAY" == true ]; then
install_envoy_gateway
fi
kubectl_wait_pods
if [ "$KIND_ENABLE_AWSSTS" == true ]; then
enable_awssts
fi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment