Skip to content

Instantly share code, notes, and snippets.

@tdewin
Last active January 14, 2022 22:36
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 2 You must be signed in to fork a gist
  • Save tdewin/46d0c5e81481fe91f5c84184cb21e949 to your computer and use it in GitHub Desktop.
Save tdewin/46d0c5e81481fe91f5c84184cb21e949 to your computer and use it in GitHub Desktop.
Kasten demo lab on ubuntu microk8s with openebs persistent volume (zfs based) and minio s3 storage
#!/usr/bin/bash
# first upload (might not work)
# make sure your vm has a second disk /dev/sdb that is completely empty (used for the zfs pool)
# you need some free ip range in your environment eg 192.168.0.40 is your server, then supply 192.168.0.41 for first and 192.168.0.49 for last if all the ips in between are free
ZFSDISK=/dev/sdb
ADMINNAME=admin
KASTENTOKENAUTH=0
if [ ! $(ls $ZFSDISK) ];then echo "ZFSDisk $ZFSDISK not found";exit -1 ;fi
#buggy minios3 auth
#read -p "First IP Range LB:" FIRSTIP && read -p "LAST IP Range LB:" LASTIP && read -s -p "Password for user $ADMINNAME: " BASICAUTH && echo "" && read -s -p "Password for user minio (simple) $ADMINNAME: " S3AUTH && echo ""
read -p "First IP Range LB:" FIRSTIP && read -p "LAST IP Range LB:" LASTIP && read -s -p "Password for user $ADMINNAME: " BASICAUTH && echo ""
sudo apt-get update -y && sudo apt-get install jq apache2-utils -y
sudo apt-get update -y && sudo apt-get upgrade -y
echo $BASICAUTH | htpasswd -ic auth $ADMINNAME
sudo echo "forcing sudo" && wget -c https://get.helm.sh/helm-v3.5.3-linux-amd64.tar.gz -O - | tar -xz && \
sudo mv ./linux-amd64/helm /bin && \
sudo wget -c "https://dl.k8s.io/release/$(wget -O - https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -O /bin/kubectl && \
sudo chmod 755 /bin/kubectl
sudo echo "forcing sudo" && wget -c https://github.com/ahmetb/kubectx/releases/download/v0.9.3/kubens_v0.9.3_linux_x86_64.tar.gz -O - | tar -xz && \
sudo mv ./kubens /bin
sudo echo "forcing sudo" && wget -c https://github.com/ahmetb/kubectx/releases/download/v0.9.3/kubectx_v0.9.3_linux_x86_64.tar.gz -O - | tar -xz && \
sudo mv ./kubectx /bin
sudo snap install microk8s --classic --channel=1.21/stable
sudo microk8s.start
sudo microk8s.status
sudo microk8s enable dns #upstream will be 8.8.8.8 check doc for supplying custom (local) DNS
sudo microk8s enable metallb:$FIRSTIP-$LASTIP #machine ip physically 192.168.0.51
sudo microk8s enable ingress
sudo usermod -a -G microk8s $(whoami)
sudo mkdir ~/.kube
sudo chown -f -R $(whoami) ~/.kube
sudo apt install zfsutils-linux -y
sudo zpool create zfspv-pool $ZFSDISK
##########################################################################################################################
#if you want to be sure, reboot and check if the zpool is autodiscovered
##########################################################################################################################
sudo zpool status
# you need to login to activate group activity, this way we kind of bypass that completely
sudo su $(whoami) -c "microk8s config > ~/.kube/config"
chmod 600 .kube/config
KUBENODE=$(kubectl get node -o json | jq '.items[] | .metadata.name' -r)
echo "################## Kubectl working on $KUBENODE"
#topology label for openebs
kubectl label node $KUBENODE openebs.io/rack=rack1
#some microk8s ninja editing
wget https://openebs.github.io/charts/zfs-operator.yaml
cat zfs-operator.yaml | sed 's#/var/lib/kubelet/#/var/snap/microk8s/common/var/lib/kubelet/#g' > zfs-operator-microk8s.yaml
kubectl apply -f zfs-operator-microk8s.yaml
#unless you enabled storage
#kubectl patch storageclass microk8s-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
cat <<EOF | kubectl apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-zfspv
annotations:
storageclass.kubernetes.io/is-default-class: "true"
parameters:
recordsize: "4k"
compression: "off"
dedup: "off"
fstype: "zfs"
poolname: "zfspv-pool"
provisioner: zfs.csi.openebs.io
EOF
#verify if zfspv is now the default
echo "################## Checking if class is available"
kubectl get sc
cat <<EOF | kubectl apply -f -
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
k10.kasten.io/is-snapshot-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
EOF
#try out a volume
cat <<EOF | kubectl apply -f -
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: csi-zfspv
spec:
storageClassName: openebs-zfspv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
EOF
while [ $(kubectl get pvc csi-zfspv -o json | jq ".status.phase" -r) != "Bound" ]; do sleep 1;echo "waiting for bound status"; done
echo "################## This should output a pvc"
kubectl get pvc
sleep 1
kubectl delete $(kubectl get pvc -o name)
helm repo add kasten https://charts.kasten.io/
kubectl create namespace kasten-io
if [ $KASTENTOKENAUTH -eq 1 ]
then
echo "################# CONFIGURING TOKENAUTH"
helm install k10 kasten/k10 --namespace=kasten-io --set externalGateway.create=true --set auth.tokenAuth.enabled=true
kubectl create serviceaccount login-sa --namespace kasten-io
else
echo "################# CONFIGURING BASICAUTH"
helm install k10 kasten/k10 --namespace=kasten-io --set externalGateway.create=true --set auth.basicAuth.enabled=true \
--set auth.basicAuth.htpasswd="$(cat auth)"
fi
#helm uninstall k10 --namespace=kasten-io
#get your service ip
echo "################## Kasten should be online"
kubectl get svc -n kasten-io gateway-ext
#adding minios3
#admin:notsecure
cat <<'EOF' > ~/minios3.yaml
apiVersion: v1
kind: Namespace
metadata:
name: minios3
---
apiVersion: v1
kind: Service
metadata:
name: minios3
labels:
app: minios3
namespace: minios3
spec:
ports:
- port: 80
targetPort: 80
name: minios3api
- port: 9001
targetPort: 9001
name: minios3console
type: LoadBalancer
selector:
app: minios3
---
apiVersion: v1
kind: Secret
metadata:
name: minio-secret
namespace: minios3
type: Opaque
data:
minio-root-password: bm90c2VjdXJl
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: minios3
namespace: minios3
spec:
serviceName: "minios3"
replicas: 1
selector:
matchLabels:
app: minios3
template:
metadata:
labels:
app: minios3
spec:
containers:
- name: minios3
image: minio/minio
args: ["server","/data","--address",":80","--console-address",":9001"]
env:
# - name: MINIO_ROOT_USER
# value: admin
# - name: MINIO_REGION_NAME
# value: us-east-1
# - name: MINIO_ROOT_PASSWORD
# valueFrom:
# secretKeyRef:
# key: minio-root-password
# name: minio-secret
ports:
- containerPort: 80
name: minios3api
- containerPort: 9001
name: minios3console
volumeMounts:
- name: data
mountPath: /data
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
EOF
sed -i "s/bm90c2VjdXJl/$(echo $S3AUTH | base64)/" ~/minios3.yaml
kubectl apply -f ~/minios3.yaml
sleep 15
kubectl -n minios3 wait --for=condition=ready pod/minios3-0 --timeout=180s
kubectl logs minios3-0
echo "################## Minio should be online"
kubectl get svc -n minios3
helm repo add bitnami https://charts.bitnami.com/bitnami
kubectl create namespace mysql-demo
helm install mysql-demo bitnami/mysql --namespace=mysql-demo
kubectl -n mysql-demo apply -f https://raw.githubusercontent.com/tdewin/mysql-employees/main/configmap.yaml
kubectl -n mysql-demo apply -f https://raw.githubusercontent.com/tdewin/mysql-employees/main/deployment.yaml
kubectl -n mysql-demo apply -f https://raw.githubusercontent.com/tdewin/mysql-employees/main/svc.yaml
sleep 15
kubectl -n mysql-demo wait --for=condition=ready pod/mysql-demo-0 --timeout=180s
kubectl -n mysql-demo get pod
kubectl -n mysql-demo apply -f https://raw.githubusercontent.com/tdewin/mysql-employees/main/initjob.yaml
kubectl create secret generic basic-auth --from-file=auth -n mysql-demo
echo "################## Fake application should be online"
kubectl -n mysql-demo get svc
cat << 'EOF' | kubectl -n mysql-demo apply -f -
apiVersion: v1
data:
auth: YWRtaW46JGFwcjEkdXNPbWV3MlIkUTZsNklnMUVVZml1a3diVHYuTGJ1Lgo=
kind: Secret
metadata:
name: basic-auth
type: Opaque
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: mysql-employees-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/auth-type: basic
nginx.ingress.kubernetes.io/auth-secret: basic-auth
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - foo'
spec:
rules:
- http:
paths:
- path: /employees(/|$)(.*)
pathType: Prefix
backend:
serviceName: mysql-employees-svc
servicePort: 80
EOF
sleep 15
echo "################## Trying out ingress"
kubectl get ingress -n mysql-demo
# kubernetes dashboard
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
kubectl create ns kubernetes-dashboard
helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard -n kubernetes-dashboard --set=service.type=LoadBalancer
cat << 'EOF' > ~/getdashboardtoken.sh
SECRETNAME=$(kubectl get sa -n kubernetes-dashboard kubernetes-dashboard -o jsonpath={.secrets[0].name})
TOKEN=$(kubectl get secret -n kubernetes-dashboard $SECRETNAME -o jsonpath={.data.token}| base64 -d)
printf "\n$TOKEN\n"
EOF
chmod +x ~/getdashboardtoken.sh
echo "##### Login"
#buggy access
#echo "Minio : admin and password you supplied / Internal DNS http://minios3.minios3 region us-east-1 bucket minios3 "
echo "Minio : minioadmin:minioadmin / Internal DNS http://minios3.minios3 "
echo "Kasten : admin and password you supplied"
echo "Use ~/getdashboardtoken.sh to get token for kubernetes dashboard"
echo "################## Showing IPs for load balancers"
kubectl get svc -A | grep LoadBalancer
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment