Don't use snap on Ubuntu! It will be hard to get help, diagnose issues - because it's not a widely used environment.
Taken from https://docs.docker.com/engine/install/ubuntu/
sudo su
apt-get update
-
apt-get install \ apt-transport-https \ ca-certificates \ curl \ gnupg \ lsb-release
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo \ "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get install docker-ce docker-ce-cli containerd.io
docker run hello-world
Taken from https://docs.docker.com/compose/install/ Replace version 1.29.2 with the latest version.
curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
docker-compose --version
-
service docker stop
-
vim /etc/hosts
Add an entry with the IP address & hostname to use for your registry.
-
vim /etc/docker/daemon.json
{ "insecure-registries": ["docker-registry-hostname:5000"] }
-
service docker start
-
mkdir docker-registry
-
cd docker-registry
-
mkdir volume
-
vim docker-registry.yaml
version: '3.8' services: docker-registry: image: registry:2 container_name: docker-registry ports: - 5000:5000 restart: always volumes: - ./volume:/var/lib/registry docker-registry-ui: image: konradkleine/docker-registry-frontend:v2 container_name: docker-registry-ui ports: - 5001:80 environment: ENV_DOCKER_REGISTRY_HOST: docker-registry ENV_DOCKER_REGISTRY_PORT: 5000
-
docker-compose -f docker-regsitry.yaml up -d
- Get Minikube from here: https://minikube.sigs.k8s.io/docs/start/
minikube start
kubectl get po -A
Mongo DB example from: https://www.youtube.com/watch?v=EQNO_kM96Mo
- Create the config map
apiVersion: v1 kind: ConfigMap metadata: name: mongodb-configmap data: database_url: mongodb-service
kubectl apply -f mongodb-configmap.yaml
- Create the secret (base64-encode the values)
apiVersion: v1 kind: Secret metadata: name: mongodb-secret type: Opaque data: mongo-root-username: cm9vdA== mongo-root-password: cGFzc3dvcmQ=
kubectl apply -f mongodb-secret.yaml
- Create the database deployment and service
apiVersion: apps/v1 kind: Deployment metadata: name: mongodb-deployment labels: app: mongodb spec: replicas: 1 selector: matchLabels: app: mongodb template: metadata: labels: app: mongodb spec: containers: - name: mongodb image: mongo ports: - containerPort: 27017 env: - name: MONGO_INITDB_ROOT_USERNAME valueFrom: secretKeyRef: name: mongodb-secret key: mongo-root-username - name: MONGO_INITDB_ROOT_PASSWORD valueFrom: secretKeyRef: name: mongodb-secret key: mongo-root-password --- apiVersion: v1 kind: Service metadata: name: mongodb-service spec: selector: app: mongodb ports: - protocol: TCP port: 27017 targetPort: 27017
kubectl apply -f mongodb-deployment.yaml
- Create the mongo-express deployment and service
apiVersion: apps/v1 kind: Deployment metadata: name: mongo-express labels: app: mongo-express spec: replicas: 1 selector: matchLabels: app: mongo-express template: metadata: labels: app: mongo-express spec: containers: - name: mongo-express image: mongo-express ports: - containerPort: 8081 env: - name: ME_CONFIG_MONGODB_ADMINUSERNAME valueFrom: secretKeyRef: name: mongodb-secret key: mongo-root-username - name: ME_CONFIG_MONGODB_ADMINPASSWORD valueFrom: secretKeyRef: name: mongodb-secret key: mongo-root-password - name: ME_CONFIG_MONGODB_SERVER valueFrom: configMapKeyRef: name: mongodb-configmap key: database_url --- apiVersion: v1 kind: Service metadata: name: mongo-express-service spec: selector: app: mongo-express type: LoadBalancer # LoadBalancer is specific to the node environment (AWS, Minikube etc) but also works on bare metal. # It is easy to use HAProxy and load balance traffic to each pod IP address (plus, maybe, Kubernetes Ingress: https://www.youtube.com/watch?v=chwofyGr80c ??) # Without an external load balancer, you can (but I feel that maybe you shouldn't) hard-configure all of your master and nodes' physical IP addresses like so: externalIPs: - 192.168.1.169 - 192.168.1.163 - 192.168.1.141 ports: - protocol: TCP port: 8081 targetPort: 8081 nodePort: 30000
kubectl apply -f mongo-express-deployment.yaml
- because this is Minikube, use it to assign an external IP address:
minikube service mongo-express-service
Notes from "Install Kubernetes | Setup Kubernetes Step by Step | Kubernetes Training | Intellipaat": https://www.youtube.com/watch?v=l7gC4SgW7DU
-
get 2 machines or VMs (Ubuntu Server is fine)
-
make sure IP addresses are assigned proplery (
dhclient -r eth0
andsudo dhclient eth0
) -
enable SSH and log in remotely so we can copy and paste, and view stdout history.
sudo vim /etc/ssh/ssh_config
and enable PasswordAuthentication -
sudo su
-
refer to kubeadm official documentation here: https://kubernetes.io/docs/setup/independent/install-kubeadm
-
apt-get update
-
install Docker on master (the "control-plane node") and nodes:
apt-get install -y docker.io
-
on master and nodes,
apt-get install -y apt-transport-https ca-certificates curl
-
on master and nodes,
curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
-
on master and nodes,
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list
-
on master and nodes,
apt-get update
-
on master and nodes,
apt-get install -y kubelet kubeadm kubectl
-
on master and nodes,
apt-mark hold kubelet kubeadm kubectl
-
on master and nodes, disable swap:
swapoff -a
andvim /etc/fstab
to comment out the /swap.img line -
create the cluster as per https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/
-
on master,
kubeadm config images pull
-
on master,
kubeadm init --apiserver-advertise-address=<private-ip-address> --pod-network-cidr=192.168.10.0/24 --ignore-preflight-errors=NumCPU,Mem
(or a suitable, separate network subnet) -
to reset kubadm init, use kubeadm reset
-
on the master,
exit
back to standard user -
follow the instructions at the bottom of the kubeadm output (mkdir ... /.kube/config)
-
if you are root, you may get "connection to the server localhost:8080 was refused" so in that case
exit
-
Calico documentation here: https://docs.projectcalico.org/getting-started/kubernetes/quickstart
-
download the Calico yaml file and vim to edit the network: https://docs.projectcalico.org/manifests/tigera-operator.yaml
-
install Tigera Calico operator on master node:
kubectl apply -f tigera-operator.yaml
-
watch pods being created:
watch kubectl get pods -n calico-system
-
once a pod network has been installed, you can confirm that it is working by checking that the CoreDNS Pod is Running in the output of
kubectl get pods --all-namespaces
-
if you lose the kubeadm join command, use this:
kubeadm token create --print-join-command
-
on the nodes,
sudo kubeadm join .... xxxx
-
kubeadm join may time out, check your IP Tables, UFW, or Amazon firewall
-
kubectl get nodes
will fail unless you follow the instructions above. -
kubectl get nodes
will give status NotReady because network add-on is not installed
A single proxy server example: https://youtu.be/chwofyGr80c?t=869 Documentation: http://cbonte.github.io/haproxy-dconv/2.4/configuration.html
apt-get install -y haproxy
vim /etc/haproxy/haproxy.cfg
-
frontend http_front bind *:80 stats uri /haproxy?stats default_backend http_back backend http_back balance roundrobin server knode 192.168.1.163:30000 server knode2 192.168.1.141:30000
systemctl enable haproxy
systemctl start haproxy
From: https://kubernetes.io/docs/tasks/configure-pod-container/translate-compose-kubernetes/ Kompose doesn't support version 3.5 or newer (as of 6/2021). It also doesn't like local volume mounts, but does generate a persistent volume claim for Kubernetes that you need to finish.
curl -L https://github.com/kubernetes/kompose/releases/download/v1.22.0/kompose-linux-amd64 -o kompose
chmod +x kompose
sudo mv kompose /usr/local/bin/kompose
kompose convert -f docker-compose-file.yaml
From: https://www.youtube.com/watch?v=to14wmNmRCI
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-pv1
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
server: SERVER_IP_ADDRESS
path: "SERVER_NFS_PATH"
... and to claim this:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
io.kompose.service: docker-registry-claim0
name: docker-registry-claim0
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Gi
kubectl get events --all-namespaces --sort-by='.metadata.creationTimestamp'
kubectl rollout restart deployment ...