Skip to content

Instantly share code, notes, and snippets.

@velp
Last active November 22, 2020 14:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save velp/151636fe01d8b8e3f9c626b30e3c2bc5 to your computer and use it in GitHub Desktop.
Save velp/151636fe01d8b8e3f9c626b30e3c2bc5 to your computer and use it in GitHub Desktop.
Run minikube with cilium CNI for test

System requirements:

  • OS: ubuntu 18.04+
  • CPU: 6+
  • RAM: 10+ Gb

Install minikube + cilium

# Run OS preparation
curl -s https://gist.githubusercontent.com/velp/151636fe01d8b8e3f9c626b30e3c2bc5/raw/prepare_system.sh | bash -s --
# Run minikube with API server
minikube start --driver=none --cni=cilium --alsologtostderr --apiserver-ips=$(ip route get 8.8.8.8  | awk ' /^[0-9]/ { print $7 }'),$(curl ifconfig.me)

Check the cluster:

root@minikube:~# kubectl get node
NAME       STATUS   ROLES    AGE    VERSION
minikube   Ready    master   8m8s   v1.19.2

root@minikube:~# kubectl get pod -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE
cilium-jtbxn                       1/1     Running   0          7m56s
cilium-operator-7c755f4594-dg7sj   1/1     Running   0          7m56s
coredns-f9fd979d6-q6ps6            1/1     Running   0          7m56s
etcd-minikube                      1/1     Running   0          8m13s
kube-apiserver-minikube            1/1     Running   0          8m13s
kube-controller-manager-minikube   1/1     Running   0          8m12s
kube-proxy-7fjz6                   1/1     Running   0          7m56s
kube-scheduler-minikube            1/1     Running   0          8m12s
storage-provisioner                1/1     Running   0          8m12s

Configure DNS to kube-dns

curl -s https://gist.githubusercontent.com/velp/151636fe01d8b8e3f9c626b30e3c2bc5/raw/configure_dns.sh | bash -s --

Install Helm 3 and Fluxctl

sudo snap install helm --classic
sudo snap install fluxctl --classic

Bash completion

echo 'source <(kubectl completion bash)' >>~/.bashrc
source ~/.bashrc

Install ingress

curl -s https://gist.githubusercontent.com/velp/151636fe01d8b8e3f9c626b30e3c2bc5/raw/install_ingress.sh | bash -s --

Install cert-manager

curl -s https://gist.githubusercontent.com/velp/151636fe01d8b8e3f9c626b30e3c2bc5/raw/install_cert-manager.sh | bash -s --

Get kube config

curl -s https://gist.githubusercontent.com/velp/151636fe01d8b8e3f9c626b30e3c2bc5/raw/get_kube_config.sh | bash -s --

Install Hubble

Documentation https://docs.cilium.io/en/v1.8/gettingstarted/hubble/

curl -s https://gist.githubusercontent.com/velp/151636fe01d8b8e3f9c626b30e3c2bc5/raw/install_hubble.sh | bash -s --

Test cassandra proxy parser

Documentation https://docs.cilium.io/en/v1.8/gettingstarted/cassandra/

Run cassandra server

kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/examples/kubernetes-cassandra/cass-sw-app.yaml

Create CiliumNetworkPolicy

kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/examples/kubernetes-cassandra/cass-sw-security-policy.yaml

Check access from outpost pod

$ kubectl exec -it $(kubectl get pods -l app=empire-outpost -o jsonpath='{.items[0].metadata.name}') -- cqlsh cassandra-svc
$ cqlsh> SELECT * FROM deathstar.scrum_notes;
Unauthorized: Error from server: code=2100 [Unauthorized] message="Request Unauthorized"

Check access from hq pod

$ kubectl exec -it $(kubectl get pods -l app=empire-hq -o jsonpath='{.items[0].metadata.name}') -- cqlsh cassandra-svc
$ cqlsh> SELECT * FROM attendance.daily_records;
 loc_id                               | creation                             | empire_member_id                     | present
--------------------------------------+--------------------------------------+--------------------------------------+---------
a855e745-69d8-4159-b8b6-e2bafed8387a | c692ce90-bf57-11e8-98e6-f1a9f45fc4d8 | cee6d956-dbeb-4b09-ad21-1dd93290fa6c |    True

<snip>

(12 rows)

Test HTTP network policy

Documentation https://docs.cilium.io/en/v1.8/gettingstarted/http/ https://docs.cilium.io/en/latest/policy/visibility/

Run test apps

kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/examples/minikube/http-sw-app.yaml

Check current cilium policies

# kubectl -n kube-system exec $(kubectl get pods -n kube-system -l "k8s-app=cilium" -o jsonpath='{.items[0].metadata.name}') -- cilium endpoint list | grep "k8s:org" -B4
25         Disabled           Disabled          7670       k8s:class=xwing                                               10.0.0.141   ready
                                                           k8s:io.cilium.k8s.policy.cluster=default
                                                           k8s:io.cilium.k8s.policy.serviceaccount=default
                                                           k8s:io.kubernetes.pod.namespace=default
                                                           k8s:org=alliance
--
44         Disabled           Disabled          8407       k8s:class=tiefighter                                          10.0.0.4     ready
                                                           k8s:io.cilium.k8s.policy.cluster=default
                                                           k8s:io.cilium.k8s.policy.serviceaccount=default
                                                           k8s:io.kubernetes.pod.namespace=default
                                                           k8s:org=empire
--
1625       Disabled           Disabled          1061       k8s:class=deathstar                                           10.0.0.5     ready
                                                           k8s:io.cilium.k8s.policy.cluster=default
                                                           k8s:io.cilium.k8s.policy.serviceaccount=default
                                                           k8s:io.kubernetes.pod.namespace=default
                                                           k8s:org=empire
--
3144       Disabled           Disabled          1061       k8s:class=deathstar                                           10.0.0.126   ready
                                                           k8s:io.cilium.k8s.policy.cluster=default
                                                           k8s:io.cilium.k8s.policy.serviceaccount=default
                                                           k8s:io.kubernetes.pod.namespace=default
                                                           k8s:org=empire

Create policy

kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/examples/minikube/sw_l3_l4_l7_policy.yaml

Check cilium policies again

# kubectl -n kube-system exec $(kubectl get pods -n kube-system -l "k8s-app=cilium" -o jsonpath='{.items[0].metadata.name}') -- cilium endpoint list | grep "k8s:org" -B4
25         Disabled           Disabled          7670       k8s:class=xwing                                               10.0.0.141   ready
                                                           k8s:io.cilium.k8s.policy.cluster=default
                                                           k8s:io.cilium.k8s.policy.serviceaccount=default
                                                           k8s:io.kubernetes.pod.namespace=default
                                                           k8s:org=alliance
--
44         Disabled           Disabled          8407       k8s:class=tiefighter                                          10.0.0.4     ready
                                                           k8s:io.cilium.k8s.policy.cluster=default
                                                           k8s:io.cilium.k8s.policy.serviceaccount=default
                                                           k8s:io.kubernetes.pod.namespace=default
                                                           k8s:org=empire
--
1625       Enabled            Disabled          1061       k8s:class=deathstar                                           10.0.0.5     ready
                                                           k8s:io.cilium.k8s.policy.cluster=default
                                                           k8s:io.cilium.k8s.policy.serviceaccount=default
                                                           k8s:io.kubernetes.pod.namespace=default
                                                           k8s:org=empire
--
3144       Enabled            Disabled          1061       k8s:class=deathstar                                           10.0.0.126   ready
                                                           k8s:io.cilium.k8s.policy.cluster=default
                                                           k8s:io.cilium.k8s.policy.serviceaccount=default
                                                           k8s:io.kubernetes.pod.namespace=default
                                                           k8s:org=empire

Add proxi-visibility

kubectl annotate pod tiefighter io.cilium.proxy-visibility="<Egress/53/UDP/DNS>,<Egress/80/TCP/HTTP>"
kubectl annotate pod xwing io.cilium.proxy-visibility="<Egress/53/UDP/DNS>,<Egress/80/TCP/HTTP>"

Check policies

# allowed L4 request
$ kubectl exec tiefighter -- curl -m 5 -s -v -XPOST deathstar.default.svc.cluster.local/v1/request-landing
*   Trying 10.107.91.73...
* TCP_NODELAY set
* Connected to deathstar.default.svc.cluster.local (10.107.91.73) port 80 (#0)
> POST /v1/request-landing HTTP/1.1
> Host: deathstar.default.svc.cluster.local
> User-Agent: curl/7.52.1
> Accept: */*
>
Ship landed
< HTTP/1.1 200 OK
< Content-Type: text/plain
< Date: Tue, 13 Oct 2020 19:46:28 GMT
< Content-Length: 12
<
{ [12 bytes data]
* Curl_http_done: called premature == 0
* Connection #0 to host deathstar.default.svc.cluster.local left intact

# denied L4 request
$ kubectl exec xwing -- curl -m 5 -s -v -XPOST deathstar.default.svc.cluster.local/v1/request-landing
*   Trying 10.107.91.73...
* TCP_NODELAY set
* Connection timed out after 5000 milliseconds
* Curl_http_done: called premature == 1
* Closing connection 0
command terminated with exit code 28

# denied L7 request
$ kubectl exec tiefighter -- curl -m 5 -s -v -XPOST deathstar.default.svc.cluster.local/v1/exhaust-port
*   Trying 10.107.91.73...
* TCP_NODELAY set
* Connected to deathstar.default.svc.cluster.local (10.107.91.73) port 80 (#0)
> POST /v1/exhaust-port HTTP/1.1
> Host: deathstar.default.svc.cluster.local
> User-Agent: curl/7.52.1
> Accept: */*
>
Access denied
< HTTP/1.1 403 Forbidden
< content-length: 15
< content-type: text/plain
< date: Tue, 13 Oct 2020 19:52:44 GMT
< server: envoy
<
{ [15 bytes data]
* Curl_http_done: called premature == 0
* Connection #0 to host deathstar.default.svc.cluster.local left intact

Links

  1. https://docs.cilium.io/en/v1.8/gettingstarted/k8s-install-default/
  2. https://docs.cilium.io/en/v1.8/concepts/security/proxy/envoy/
  3. https://cilium.io/blog/2018/10/23/cilium-13-envoy-go
  4. https://docs.cilium.io/en/v1.8/gettingstarted/cassandra/
  5. https://docs.cilium.io/en/v1.8/gettingstarted/http/
  6. https://docs.cilium.io/en/v1.8/gettingstarted/hubble/
  7. https://docs.cilium.io/en/latest/policy/visibility/#proxy-visibility
  8. https://docs.cilium.io/en/stable/configuration/metrics/
#!/bin/bash
KUBE_DNS_IP=$(kubectl get svc -n kube-system -l "k8s-app=kube-dns" -o jsonpath='{.items[0].spec.clusterIP}')
PUBLIC_INTERFACE=$(ip route get 8.8.8.8 | awk ' /^[0-9]/ { print $5 }')
IP_MASK_PUBLIC_INTERFACE=$(ip -o -f inet addr show dev $PUBLIC_INTERFACE | awk '/scope global/ {print $4}')
DEFAULT_GATEWAY=$(ip route get 8.8.8.8 | awk ' /^[0-9]/ { print $3 }')
echo "Create configuration files"
cat << EOF > /etc/systemd/network/10-wired.network
[Match]
Name=$PUBLIC_INTERFACE
[Network]
Address=$IP_MASK_PUBLIC_INTERFACE
Gateway=$DEFAULT_GATEWAY
DNS=$KUBE_DNS_IP
EOF
cat << EOF > /etc/systemd/resolved.conf
[Resolve]
DNS=$KUBE_DNS_IP
EOF
echo "Reload services"
systemctl daemon-reload
systemctl restart systemd-networkd
systemctl restart systemd-resolved
echo "Create resolv.conf symlink"
rm -rf /etc/resolv.conf
ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
#!/bin/bash
USERNAME="admin"
NAMESPACE="kube-system"
SERVER="https://$(curl ifconfig.me):8443"
echo "Creating service account for admin..."
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ${USERNAME}
namespace: ${NAMESPACE}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ${USERNAME}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: ${USERNAME}
namespace: ${NAMESPACE}
EOF
TOKEN_SECRET=$(kubectl -n kube-system get secret | grep ${USERNAME} | awk '{print $1}')
CA=$(kubectl get secret -n ${NAMESPACE} ${TOKEN_SECRET} -o jsonpath='{.data.ca\.crt}')
TOKEN=$(kubectl get secret -n ${NAMESPACE} ${TOKEN_SECRET} -o jsonpath='{.data.token}' | base64 --decode)
echo "
apiVersion: v1
kind: Config
clusters:
- name: default-cluster
cluster:
certificate-authority-data: ${CA}
server: ${SERVER}
contexts:
- name: default-context
context:
cluster: default-cluster
namespace: default
user: default-user
current-context: default-context
users:
- name: default-user
user:
token: ${TOKEN}
" > ~/k8s-minikube.conf
echo "Kube config saved to: ~/k8s-minikube.conf"
#!/bin/bash
echo "Create cert-manager namespace"
kubectl create ns cert-manager
echo "Install cert-manager with helm 3"
helm repo add jetstack https://charts.jetstack.io
helm install cert-manager \
jetstack/cert-manager \
--namespace cert-manager \
--version v1.0.4 \
--set installCRDs=true
#echo "Install cert-manager without helm"
#kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml
echo "Create test cert-manager"
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager-test
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: test-selfsigned
namespace: cert-manager-test
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: selfsigned-cert
namespace: cert-manager-test
spec:
dnsNames:
- example.com
secretName: selfsigned-cert-tls
issuerRef:
name: test-selfsigned
EOF
echo "Check the status of the newly created certificate"
kubectl wait --for=condition=ready certificate -n cert-manager-test selfsigned-cert
echo "Cleanup test namespace"
kubectl delete ns cert-manager-test
echo "Install kubectl plugin for cert-manager"
curl -L -o kubectl-cert-manager.tar.gz https://github.com/jetstack/cert-manager/releases/download/v1.0.4/kubectl-cert_manager-linux-amd64.tar.gz
tar xzf kubectl-cert-manager.tar.gz
sudo mv kubectl-cert_manager /usr/local/bin
echo "Create ClusterIssuers for email velizarx@gmail.com"
cat <<EOF | kubectl apply -f -
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
server: https://acme-staging-v02.api.letsencrypt.org/directory
email: velizarx@gmail.com
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: velizarx@gmail.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx
EOF
echo "Check the status of the newly created issuers"
kubectl wait --for=condition=ready clusterissuer letsencrypt-prod
kubectl wait --for=condition=ready clusterissuer letsencrypt-staging
#!/bin/bash
echo "Install Hubble"
kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/experimental-install.yaml
echo "Publish Hubble via NodePort"
CURRENT_PORT_TYPE=$(kubectl get svc -n kube-system hubble-ui -o jsonpath='{.spec.type}')
if [ "$CURRENT_PORT_TYPE" != "NodePort" ]; then
kubectl delete svc -n kube-system hubble-ui
kubectl expose deployment -n kube-system hubble-ui --type=NodePort --port=12000
fi
echo "Install latest Hubble CLI"
HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt)
curl -LO "https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-amd64.tar.gz"
curl -LO "https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-amd64.tar.gz.sha256sum"
sha256sum --check hubble-linux-amd64.tar.gz.sha256sum
tar zxf hubble-linux-amd64.tar.gz
sudo mv hubble /usr/local/bin
PUBLI_IP=$(curl ifconfig.me)
HUBBLE_NODE_PORT=$(kubectl get svc -n kube-system hubble-ui -o jsonpath='{.spec.ports[0].nodePort}')
echo "Hubble exposed on: http://${PUBLI_IP}:${HUBBLE_NODE_PORT}"
#!/bin/bash
# Install ingress controller with NodePort service
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.40.2/deploy/static/provider/baremetal/deploy.yaml
# Find port and ip address for Haproxy config
PUBLIC_IP=$(ip route get 8.8.8.8 | awk ' /^[0-9]/ { print $7 }')
INGRESS_HTTP_PORT=$(kubectl get svc -n ingress-nginx ingress-nginx-controller -o json | jq -r '.spec.ports[] | select(.name == "http") | .nodePort')
INGRESS_HTTPS_PORT=$(kubectl get svc -n ingress-nginx ingress-nginx-controller -o json | jq -r '.spec.ports[] | select(.name == "https") | .nodePort')
# Install Haproxy
sudo add-apt-repository ppa:vbernat/haproxy-2.1 --yes
sudo apt-get install haproxy -y
# Configure Haproxy
cat << EOF > /etc/haproxy/haproxy.cfg
global
log /dev/log local0
log /dev/log local1 notice
user haproxy
group haproxy
maxconn 1000000
daemon
defaults
log global
mode tcp
option tcplog
option dontlognull
timeout connect 10s
timeout client 1m
timeout server 1m
frontend http_frontend
bind ${PUBLIC_IP}:80
default_backend http_backend
backend http_backend
server ingress_http ${PUBLIC_IP}:${INGRESS_HTTP_PORT}
frontend https_frontend
bind ${PUBLIC_IP}:443
default_backend https_backend
backend https_backend
server ingress_https ${PUBLIC_IP}:${INGRESS_HTTPS_PORT}
EOF
# Reload Haproxy
systemctl reload haproxy
#!/bin/bash
set -x
echo "Preparing docker on a host (https://kubernetes.io/docs/setup/production-environment/container-runtimes/)"
# (Install Docker CE)
## Set up the repository:
### Install packages to allow apt to use a repository over HTTPS
sudo apt-get update && sudo apt-get install -y \
apt-transport-https ca-certificates curl software-properties-common gnupg2 jq nfs-kernel-server
# Add Docker's official GPG key:
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
# Add the Docker apt repository:
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
# Install Docker CE
sudo apt-get update && sudo apt-get install -y \
containerd.io=1.2.13-2 \
docker-ce=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) \
docker-ce-cli=5:19.03.11~3-0~ubuntu-$(lsb_release -cs)
# Set up the Docker daemon
cat <<EOF | sudo tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
sudo mkdir -p /etc/systemd/system/docker.service.d
# Restart Docker
sudo systemctl daemon-reload
sudo systemctl restart docker
sudo systemctl enable docker
docker version
echo "Preparing dependencies for Minikube and Kubernetes"
sudo apt-get install -y socat ebtables conntrack
echo "Preparing K8S runtime (https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)"
sudo modprobe overlay
sudo modprobe br_netfilter
# Set up required sysctl params, these persist across reboots.
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sudo sysctl --system
echo "Preparing kubectl v1.19.2 on a host (https://kubernetes.io/docs/tasks/tools/install-kubectl/)"
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.19.2/bin/linux/amd64/kubectl
sudo mv kubectl /usr/local/bin/kubectl
sudo chmod a+x /usr/local/bin/kubectl
kubectl version
echo "Preparing minikube v1.13.1 on a host (https://kubernetes.io/ru/docs/tasks/tools/install-minikube/)"
curl -Lo minikube https://storage.googleapis.com/minikube/releases/v1.13.1/minikube-linux-amd64
sudo chmod +x minikube
sudo mv minikube /usr/local/bin/minikube
minikube version
echo "Preparing BPF filesystem"
sudo mount bpffs -t bpf /sys/fs/bpf
# Create directory for manual k8s volumes
mkdir -p /mnt/volumes
# # Create minikube user
# useradd -G sudo,docker -m -d /home/minikube -s /bin/bash minikube
# echo "minikube ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment