Skip to content

Instantly share code, notes, and snippets.

@kacole2
Created April 8, 2020 19:51
Show Gist options
  • Save kacole2/2706fd9b4cfaf4442ed51648e5133d60 to your computer and use it in GitHub Desktop.
Save kacole2/2706fd9b4cfaf4442ed51648e5133d60 to your computer and use it in GitHub Desktop.
QuickStart for using the Cluster API provider for VMware vSphere (CAPI with CAPV)
#!/bin/bash
# CAPV Bootstrapping to deploy a management-cluster and a single workload cluster (with any number of workers) with Calico.
# This script has been tested on Ubuntu 18.04 with Cluster API 0.3, kind 0.7, and base images of Ubuntu and Photon 1.17.3.
# This assumes there are DHCP addresses available for the clusters being deployed in the vSphere environment. Static IPs are not supported with this quickstart.
# WE NEED A GITHUB TOKEN!! THIS BUG IS BEING WORKED ON. Get yours at https://github.com/settings/tokens
export GITHUB_TOKEN=<YOUR TOKEN>
# An SSH Public Key is Needed.
# $ ssh-keygen -o
# cat ~/.ssh/id_rsa.pub
# copy that SSH Key into the input below
# STEP 1
# MAKE SURE YOU HAVE THE CLUSTERAPI TEMPLATE DEPLOYED TO vSphere BEFORE STARTING. USE THAT TEMPLATE NAME IN THE USER INPUTS WHEN PROMPTED.
# refer to https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/blob/master/docs/getting_started.md#uploading-the-capv-machine-image
# STEP 2
# Copy this script into a file such as `capv.sh`, run `sudo chmod u+x capv.sh`, then execute with `sudo ./capv.sh`
# Prompt user for inputs to build the environment values
echo "Enter the following variables needed for deployment:\n\n"
read -p "vCenter Server IP or FQDN: " vcenter
if [[ -z "$vcenter" ]]; then
printf '%s\n' "Please start over. No input entered."
exit 1
fi
read -p "vCenter Username [administrator@vsphere.local]: " username
username=${username:-administrator@vsphere.local}
read -s -p "vCenter Password [VMware1!]: " pswd
pswd=${pswd:-VMware1!}
echo " "
read -p "vSphere Datacenter [Datacenter]: " datacenter
datacenter=${datacenter:-Datacenter}
read -p "vSphere Datastore [vsanDatastore]: " datastore
datastore=${datastore:-vsanDatastore}
read -p "vSphere VM Network [vms]: " network
network=${network:-vms}
read -p "vSphere Resource Pool [*/Resources]: " resourcepool
resourcepool=${resourcepool:-*/Resources}
if [[ $resourcepool == "*/Resources" ]]; then
resourcepool='\"*/Resources\"'
fi
read -p "vSphere Folder [Discovered virtual machine]: " folder
folder=${folder:-Discovered virtual machine}
read -p "VM Template [photon-3-kube-v1.17.3]: " vmtemplate
vmtemplate=${vmtemplate:-photon-3-kube-v1.17.3}
read -p "HAProxy Template [capv-haproxy-v0.6.3]: " haproxytemplate
haproxytemplate=${haproxytemplate:-capv-haproxy-v0.6.3}
read -p "Public SSH Authorized Key (required): " publicssh
if [[ -z "$publicssh" ]]; then
printf '%s\n' "Please start over. No input entered."
exit 1
fi
echo "Local Control Plane (Management Cluster) Configuration:"
while [[ ! $localcontrolplanereplicas =~ ^[0-9]+$ ]]; do
read -p "How many control plane nodes in the Local Control Plane (Management Cluster) [1]: " localcontrolplanereplicas
localcontrolplanereplicas=${localcontrolplanereplicas:-1}
done
while [[ ! $localcontrolplaneworkers =~ ^[0-9]+$ ]]; do
read -p "How many worker nodes in the Local Control Plane (Management Cluster)? [1]: " localcontrolplaneworkers
localcontrolplaneworkers=${localcontrolplaneworkers:-1}
done
k8sversion01=${vmtemplate#*-v}
read -p "Do you want to create a Kubernetes Workload Cluster [Y]: " ynworkloadcluster
ynworkloadcluster=${ynworkloadcluster:-Y}
if [[ $ynworkloadcluster == "Y" ]]; then
echo "Kubernetes Workload Cluster Configuration:"
while [[ ! $workloadcontrolplanereplicas =~ ^[0-9]+$ ]]; do
read -p "How many control plane nodes? [1]: " workloadcontrolplanereplicas
workloadcontrolplanereplicas=${workloadcontrolplanereplicas:-1}
done
while [[ ! $workloadworkerreplicas =~ ^[0-9]+$ ]]; do
read -p "How many worker nodes? [1]: " workloadworkerreplicas
workloadworkerreplicas=${workloadworkerreplicas:-1}
done
read -p "Kubernetes Version for Workload Cluster [$k8sversion01]: " k8sversion02
if [[ -z "$k8sversion02" ]]; then
k8sversion=$k8sversion01
else
k8sversion=${k8sversion:-$k8sversion02}
fi
read -p "VM Storage Policy for Default StorageClass [vSAN Default Storage Policy]: " storagepolicy
storagepolicy=${storagepolicy:-vSAN Default Storage Policy}
read -p "Install Octant? [Y]: " octantinstall
octantinstall=${octantinstall:-Y}
fi
#Install Latest Stable Docker Release
apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update -y
apt-get install -y docker-ce docker-ce-cli containerd.io
tee /etc/docker/daemon.json >/dev/null <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
groupadd docker
MAINUSER=$(logname)
usermod -aG docker $MAINUSER
systemctl daemon-reload
systemctl restart docker
echo "Docker Installation done"
#Install Latest Stable kubectl Release
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
echo "kubectl Installation done"
#Install kind
KINDVERSION=$(curl -s https://github.com/kubernetes-sigs/kind/releases/latest/download 2>&1 | grep -Po [0-9]+\.[0-9]+\.[0-9]+)
curl -L "https://github.com/kubernetes-sigs/kind/releases/download/v$KINDVERSION/kind-$(uname -s | tr '[:upper:]' '[:lower:]')-$(dpkg --print-architecture)" -o /usr/local/bin/kind
chmod +x /usr/local/bin/kind
echo "kind Installation done"
#Install Latest Stable clusterctl Release
CLUSTERCTLVERSION=$(curl -s https://github.com/kubernetes-sigs/cluster-api/releases/latest/download 2>&1 | grep -Po [0-9]+\.[0-9]+\.[0-9]+)
curl -L "https://github.com/kubernetes-sigs/cluster-api/releases/download/v$CLUSTERCTLVERSION/clusterctl-$(uname -s | tr '[:upper:]' '[:lower:]')-$(dpkg --print-architecture)" -o /usr/local/bin/clusterctl
#BandAid Until it's released
#curl -L "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.0-rc.3/clusterctl-$(uname -s | tr '[:upper:]' '[:lower:]')-$(dpkg --print-architecture)" -o /usr/local/bin/clusterctl
chmod +x /usr/local/bin/clusterctl
echo "clusterctl Installation done"
# Create the environment variable file needed by clusterctl
mkdir -p ~/.cluster-api
tee ~/.cluster-api/clusterctl.yaml >/dev/null <<EOF
## -- Controller settings -- ##
VSPHERE_USERNAME: "$username" # The username used to access the remote vSphere endpoint
VSPHERE_PASSWORD: "$pswd" # The password used to access the remote vSphere endpoint
## -- Required workload cluster default settings -- ##
VSPHERE_SERVER: "$vcenter" # The vCenter server IP or FQDN
VSPHERE_DATACENTER: "$datacenter" # The vSphere datacenter to deploy the management cluster on
VSPHERE_DATASTORE: "$datastore" # The vSphere datastore to deploy the management cluster on
VSPHERE_NETWORK: "$network" # The VM network to deploy the management cluster on
VSPHERE_RESOURCE_POOL: "$resourcepool" # The vSphere resource pool for your VMs
VSPHERE_FOLDER: "$folder" # The VM folder for your VMs. Set to "" to use the root vSphere folder
VSPHERE_TEMPLATE: "$vmtemplate" # The VM template to use for your management cluster.
VSPHERE_HAPROXY_TEMPLATE: "$haproxytemplate" # The VM template to use for the HAProxy load balancer
VSPHERE_SSH_AUTHORIZED_KEY: "$publicssh" # The public ssh authorized key on all machines in this cluster. Set to "" if you don't want to enable SSH, or are using another solution.
EOF
echo "Configuration File done"
echo "Creating Boostrap Cluster with KIND"
kind create cluster --name bootstrap
echo "Adding ClusterAPI Components to Boostrap Cluster"
clusterctl init --infrastructure vsphere
echo "Waiting for bootstrap cluster to come online"
sleep 30
#Create the Local Control Plane / Management Cluster
mkdir -p ./local-control-plane
clusterctl config cluster local-control-plane \
--infrastructure vsphere \
--kubernetes-version v$k8sversion01 \
--control-plane-machine-count $localcontrolplanereplicas \
--worker-machine-count $localcontrolplaneworkers > ./local-control-plane/cluster.yaml
echo "Manifests for Local Control Plane have been created\n"
echo "Deploying Local Control Plane"
kubectl apply -f ./local-control-plane/cluster.yaml
# It doesn't take 7 minutes. But this is a safe waiting period.
# Snapshot the images to utilize LinkedClones
echo "Waiting 7 minutes for Local Control Plane to come online"
sleep 420
kubectl get secret local-control-plane-kubeconfig -o=jsonpath='{.data.value}' | \
{ base64 -d 2>/dev/null || base64 -D; } >./local-control-plane/kubeconfig
export KUBECONFIG="$(pwd)/local-control-plane/kubeconfig"
echo "Deploying Calico to Local Control Plane"
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
sleep 30
echo "Deploying ClusterAPI to Local Control Plane"
clusterctl init --infrastructure vsphere
echo "Moving ClusterAPI Objects from bootstrap to Local Control Plane"
sleep 60
unset KUBECONFIG
clusterctl move --to-kubeconfig="$(pwd)/local-control-plane/kubeconfig"
echo "Removing boostrap cluster"
kind delete cluster --name bootstrap
echo "Seting kubectl context to Local Control Plane"
cp $(pwd)/local-control-plane/kubeconfig $(pwd)/.kube/config
if [[ $ynworkloadcluster == "Y" ]]; then
#Create the Workload Cluster
mkdir -p ./workload-cluster
clusterctl config cluster workload-cluster \
--infrastructure vsphere \
--kubernetes-version v$k8sversion \
--control-plane-machine-count $workloadcontrolplanereplicas \
--worker-machine-count $workloadworkerreplicas > ./workload-cluster/cluster.yaml
echo "Manifests for Workload Cluster have been created\n"
echo "Deploying Workload Cluster"
kubectl apply -f ./workload-cluster/cluster.yaml
echo "Waiting 12 minutes for Workload Cluster to come online"
sleep 720
kubectl get secret workload-cluster-kubeconfig -o=jsonpath='{.data.value}' | \
{ base64 -d 2>/dev/null || base64 -D; } >./workload-cluster/kubeconfig
export KUBECONFIG="$(pwd)/workload-cluster/kubeconfig"
echo "Deploying Calico to Workload Cluster"
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
sleep 30
kubectl get nodes -o wide
# Create a default StorageClass to use persistent volumes in the workload cluster.
tee defaultstorageclass.yaml >/dev/null <<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: standard
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: csi.vsphere.vmware.com
parameters:
storagepolicyname: "$storagepolicy"
EOF
kubectl apply -f defaultstorageclass.yaml
# Setting ownership properties back to user for kubeconfig files
chown -R $MAINUSER:$MAINUSER $(pwd)/workload-cluster
#Install Latest Stable Octant Release
if [[ $octantinstall == "Y" ]]; then
OCTANTVERSION=$(curl -s https://github.com/vmware-tanzu/octant/releases/latest/download 2>&1 | grep -Po [0-9]+\.[0-9]+\.[0-9]+)
curl -L "https://github.com/vmware-tanzu/octant/releases/download/v${OCTANTVERSION}/octant_${OCTANTVERSION}_$(uname -s)-$(getconf LONG_BIT)bit.deb" -o octant.deb
dpkg -i octant.deb
octant --disable-open-browser --listener-addr 0.0.0.0:7777 --kubeconfig $(pwd)/workload-cluster/kubeconfig > /dev/null 2>&1 &
echo "octant Installation done"
fi
fi
# Setting ownership properties back to user for kubeconfig files
chown -R $MAINUSER:$MAINUSER $(pwd)/.cluster-api
chown -R $MAINUSER:$MAINUSER $(pwd)/local-control-plane
chown -R $MAINUSER:$MAINUSER $(pwd)/.kube
echo -e "Kubernetes Workload Cluster Deployment is Complete! \n\n Use 'export KUBECONFIG=\"\$(pwd)/local-control-plane/kubeconfig\"' to interact with the Local Control Plane (Management Cluster) using kubectl."
if [[ $ynworkloadcluster == "Y" ]]; then
echo -e "Use 'export KUBECONFIG=\"\$(pwd)/workload-cluster/kubeconfig\"' to interact with the workload cluster using kubectl. \nAccess the Octant UI at $(hostname -f):7777 or $(hostname -I|cut -d" " -f 1):7777"
fi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment