- exo CLI
- cs
- jq
exo -A prod privnet create –startip 10.240.0.1 –endip 10.240.0.253 –netmask 255.255.255.0 –zone ch-gva-2 kubernetes-the-hard-way
exo -A prod firewall create kubernetes-the-hard-way-allow-external
exo -A prod firewall add kubernetes-the-hard-way-allow-external –port 22 –protocol tcp –cidr 0.0.0.0/0 exo -A prod firewall add kubernetes-the-hard-way-allow-external –port 6443 –protocol tcp –cidr 0.0.0.0/0 exo -A prod firewall add kubernetes-the-hard-way-allow-external –protocol icmp –cidr 0.0.0.0/0
This EIP will be used for the load balancer:
exo -A prod eip create ch-gva-2 –description kubernetes-the-hard-way
Create the 3 controllers instances:
exo -A prod vm create –security-group kubernetes-the-hard-way-allow-external –keypair “perso” –service-offering small –disk 25 –template “Linux Ubuntu 18.04 LTS 64-bit” –zone ch-gva-2 controller-0 controller-1 controller-2
Add them in the privnet, and set a static IP for each of them:
exo -A prod privnet associate kubernetes-the-hard-way controller-0 10.240.0.10 exo -A prod privnet associate kubernetes-the-hard-way controller-1 10.240.0.11 exo -A prod privnet associate kubernetes-the-hard-way controller-2 10.240.0.12
Add in /etc/hosts:
10.240.0.20 worker-0 10.240.0.21 worker-1 10.240.0.22 worker-2
Create the 3 workers instances:
exo -A prod vm create –security-group kubernetes-the-hard-way-allow-external –keypair “perso” –service-offering small –disk 25 –template “Linux Ubuntu 18.04 LTS 64-bit” –zone ch-gva-2 worker-0 worker-1 worker-2
Add them in the privnet, and set a static IP for each of them:
exo -A prod privnet associate kubernetes-the-hard-way worker-0 10.240.0.20 exo -A prod privnet associate kubernetes-the-hard-way worker-1 10.240.0.21 exo -A prod privnet associate kubernetes-the-hard-way worker-2 10.240.0.22
In /etc/netplan/eth1.yaml, add:
network: version: 2 ethernets: eth1: dhcp4: true
Then run: sudo netplan apply
Generate a certificate and private key for each Kubernetes worker node:
for instance in worker-0 worker-1 worker-2; do cat > ${instance}-csr.json <<EOF { "CN": "system:node:${instance}", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "system:nodes", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF EXTERNAL_IP=$(exo -A prod vm show ${instance} --output-format text --output-template '{{.IPAddress}}') NETWORK_ID=$(exo -A prod privnet show kubernetes-the-hard-way --output-format text --output-template '{{.ID}}') VIRTUALMACHINE_ID=$(exo -A prod vm show ${instance} --output-format text --output-template '{{.ID}}') INTERNAL_IP=$(cs --region=prod listNics virtualmachineid=${VIRTUALMACHINE_ID} networkid=${NETWORK_ID} | jq .nic[0].ipaddress | tr -d '"') cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -hostname=${instance},${EXTERNAL_IP},${INTERNAL_IP} \ -profile=kubernetes \ ${instance}-csr.json | cfssljson -bare ${instance} done
Generate the Kubernetes API Server certificate and private key:
KUBERNETES_PUBLIC_ADDRESS=$(exo -A prod eip list --output-format text --output-template '{{.IPAddress}} {{.Description}}' | grep "kubernetes-the-hard-way" | awk '{print $1}') KUBERNETES_HOSTNAMES=kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local cat > kubernetes-csr.json <<EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "Kubernetes", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -hostname=10.32.0.1,10.240.0.10,10.240.0.11,10.240.0.12,${KUBERNETES_PUBLIC_ADDRESS},127.0.0.1,${KUBERNETES_HOSTNAMES} \ -profile=kubernetes \ kubernetes-csr.json | cfssljson -bare kubernetes
Copy the appropriate certificates and private keys to each worker instance:
for instance in worker-0 worker-1 worker-2; do EXTERNAL_IP=$(exo -A prod vm show ${instance} --output-format text --output-template '{{.IPAddress}}') scp ca.pem ${instance}-key.pem ${instance}.pem ubuntu@${EXTERNAL_IP}:~/ done
Copy the appropriate certificates and private keys to each controller instance:
for instance in controller-0 controller-1 controller-2; do EXTERNAL_IP=$(exo -A prod vm show ${instance} --output-format text --output-template '{{.IPAddress}}') scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem service-account-key.pem service-account.pem ubuntu@${EXTERNAL_IP}:~/ done
It includes retrieving the EIP (Kubernetes Public IP Address)
KUBERNETES_PUBLIC_ADDRESS=$(exo -A prod eip list --output-format text --output-template '{{.IPAddress}} .{{.Description}}' | grep "kubernetes-the-hard-way" | awk '{print $1}') for instance in worker-0 worker-1 worker-2; do kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority=ca.pem \ --embed-certs=true \ --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \ --kubeconfig=${instance}.kubeconfig kubectl config set-credentials system:node:${instance} \ --client-certificate=${instance}.pem \ --client-key=${instance}-key.pem \ --embed-certs=true \ --kubeconfig=${instance}.kubeconfig kubectl config set-context default \ --cluster=kubernetes-the-hard-way \ --user=system:node:${instance} \ --kubeconfig=${instance}.kubeconfig kubectl config use-context default --kubeconfig=${instance}.kubeconfig done
Copy the appropriate kubelet and kube-proxy kubeconfig files to each worker instance:
for instance in worker-0 worker-1 worker-2; do EXTERNAL_IP=$(exo -A prod vm show ${instance} --output-format text --output-template '{{.IPAddress}}') scp ${instance}.kubeconfig kube-proxy.kubeconfig ubuntu@${EXTERNAL_IP}:~/ done
Copy the appropriate kube-controller-manager and kube-scheduler kubeconfig files to each controller instance:
for instance in controller-0 controller-1 controller-2; do EXTERNAL_IP=$(exo -A prod vm show ${instance} --output-format text --output-template '{{.IPAddress}}') scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ubuntu@${EXTERNAL_IP}:~/ done
Copy the encryption-config.yaml encryption config file to each controller instance:
for instance in controller-0 controller-1 controller-2; do EXTERNAL_IP=$(exo -A prod vm show ${instance} --output-format text --output-template '{{.IPAddress}}') scp encryption-config.yaml ubuntu@${EXTERNAL_IP}:~/ done
I’ve done it manually for the INTERNAL_IP variable. The INTERNAL_IP variable should be set to your eth1 ip.
INTERNAL_IP=$(cs –region=prod listNics virtualmachineid=${VIRTUALMACHINE_ID} networkid=${NETWORK_ID} | jq .nic[0].ipaddress | tr -d ‘”’)
the nginx part (Enable HTTP Health Checks) is useless
custom section for exoscale
Retrieve the EIP ID, create a new VM which will be our load balancer, update the EIP to be managed, attach the EIP to the LB host.
KUBERNETES_PUBLIC_ADDRESS=$(exo -A prod eip list --output-format text --output-template '{{.IPAddress}} {{.Description}}' | grep "kubernetes-the-hard-way" | awk '{print $1}') KUBERNETES_PUBLIC_ADDRESS_ID=$(exo -A prod eip list --output-format text --output-template '{{.ID}} {{.Description}}' | grep "kubernetes-the-hard-way" | awk '{print $1}') exo -A prod vm create --security-group kubernetes-the-hard-way-allow-external --keypair "perso" --service-offering small --disk 100 --template "Linux Ubuntu 18.04 LTS 64-bit" --zone ch-gva-2 lb-k8s exo -A prod privnet associate kubernetes-the-hard-way lb-k8s 10.240.0.30 exo -A prod eip update --healthcheck-interval 10 --healthcheck-mode tcp --healthcheck-port 6443 ${KUBERNETES_PUBLIC_ADDRESS_ID} exo -A prod eip associate ${KUBERNETES_PUBLIC_ADDRESS} lb-k8s
exo -A prod ssh lb-k8s sudo su - dhclient eth1 apt-get update apt-get -y install haproxy
Add this configuration at the end of the `/etc/haproxy/haproxy.cfg` file:
frontend frontend_k8s bind :6443 mode tcp default_backend k8s_backend backend k8s_backend balance roundrobin mode tcp option redispatch server controller-0 10.240.0.10:6443 check server controller-1 10.240.0.11:6443 check server controller-1 10.240.0.12:6443 check
Restart the haproxy service, you should now be able to curl the api with:
curl –cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version
Retrieve the Pod CIDR range for the current compute instance:
The POD_CIDR is 10.200.${i}.0/24, i being your worker-{i} number.
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the external load balancer fronting the Kubernetes API Servers will be used.
Generate a kubeconfig file suitable for authenticating as the admin user:
KUBERNETES_PUBLIC_ADDRESS=$(exo -A prod eip list --output-format text --output-template '{{.IPAddress}} {{.Description}}' | grep "kubernetes-the-hard-way" | awk '{print $1}') kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority=ca.pem \ --embed-certs=true \ --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 kubectl config set-credentials admin \ --client-certificate=admin.pem \ --client-key=admin-key.pem kubectl config set-context kubernetes-the-hard-way \ --cluster=kubernetes-the-hard-way \ --user=admin kubectl config use-context kubernetes-the-hard-way
For all nodes (controllers and workers), add the routes in “/etc/netplan/eth1.yaml”:
network: version: 2 ethernets: eth1: dhcp4: true routes: - to: 10.200.0.0/24 via: 10.240.0.20 metric: 100 - to: 10.200.1.0/24 via: 10.240.0.21 metric: 100 - to: 10.200.2.0/24 via: 10.240.0.22 metric: 100
Then, run “sudo netplan apply”