- etcd
- NobleProg - Kubernetes
- Kubernetes - installing Addons
- Kubernetes - network plugins comparison
- Creating a single control-plane cluster with kubeadm
- Kubernetes - ingress controller
- YAMLs 1 from NobleProg
- YAMLs 2 from NobleProg
- Prometheus
- Grafana
sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update && apt-cache madison docker-ce
sudo apt-get install -y docker-ce=18.06.3~ce~3-0~ubuntu
sudo usermod -aG docker ubuntu
sudo apt-get update && sudo apt-get install -y apt-transport-https bash-completion
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo add-apt-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"
sudo apt-get update && sudo apt-get install -y kubeadm=1.13.6-00 kubectl=1.13.6-00 kubelet=1.13.6-00
source <(kubectl completion bash) && echo "source <(kubectl completion bash)" >> ~/.bashrc
source <(kubeadm completion bash) && echo "source <(kubeadm completion bash)" >> ~/.bashrc
sudo kubeadm init
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get node
kubectl get node -o wide
kubectl api-resources
kubectl get namespaces
kubectl get pod --all-namespaces
kubectl get pod --namespace=kube-system
kubectl get node,ns,pod --namespace=kube-system
kubectl get node,ns,pod --namespace=kube-system -o wide
watch -n1 kubectl get node,ns,pod --namespace=kube-system -o wide
kubectl describe pod --namespace=kube-system kube-apiserver-server-s0
kubectl apply -f [podnetwork].yaml
# podnetwork with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
sudo kubeadm join 11.0.2.10:6443 --token s615s7.njot7h0u45qzilpy --discovery-token-ca-cert-hash sha256:8752e8de6915d6e54f3dcce94bcf8dc743c5623561abe631508ad8bdc466a768
kubectl get nodes
kubeadm token list
kubeadm token create --print-join-command
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta1/aio/deploy/recommended.yaml
kubectl -n kube-system describe secrets `kubectl -n kube-system get secrets | awk '/kubernetes-dashboard/ {print $1}'`| awk '/token:/ {print $2}'
kubectl proxy
kubectl get secrets --namespace=kube-system kb-service-account-token-qf625 -o go-template='{{(.data.token)}}' | base64 --decode
more ~/.kube/config
FROM golang:1.11-alpine as builder
WORKDIR /tmp
RUN apk add git upx --no-cache && \
go get github.com/pwaller/goupx
COPY training-app.go .
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o training-app -ldflags="-s -w" . && \
goupx training-app
# FROM alpine:3.9
FROM scratch
COPY --from=builder /tmp/training-app .
CMD ["./training-app"]
# more info
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://blog.filippo.io/shrink-your-go-binaries-with-this-one-weird-trick/
apiVersion: apps/v1
kind: Deployment
metadata:
name: training
labels:
app: training
spec:
selector:
matchLabels:
app: training
replicas: 3
template:
metadata:
labels:
app: training
spec:
containers:
- name: training
image: training:app
apiVersion: v1
kind: Service
metadata:
name: training
labels:
app: training
spec:
type: ClusterIP
selector:
app: training
ports:
- port: 80
protocol: TCP
targetPort: 80
- deployment
- daemon set
- stateful set
K8s: Deployments vs StatefulSets vs DaemonSets
kubectl apply -f training-app-base.yaml
kubectl delete -f training-app-base.yaml
kubectl get service,deployment,pod -o wide
kubectl delete pod training-7d99cfcbdf-b4zlx
kubectl delete pod --namespace=kube-system weave-net-7tbkw
kubectl delete pod -l app=training
kubectl delete pod --all
kubectl scale deployment --replicas=5 training
kubectl exec -it training-7d99cfcbdf-fgnqc sh
kubectl logs --namespace=kube-system etcd-server-s0 --tail=10
kubectl logs --namespace=kube-system etcd-server-s0 --tail=10 --previous
kubectl logs training-app-client --follow
kubectl get pod -L app
kubectl get node,ns,pod,deployments,service,ep --all-namespaces -o wide -L app
kubectl get pod -l app
kubectl edit service training
kubectl edit pod training-7d99cfcbdf-6r9z9
- Cluster IP - from internal cluster to service running on pods on many workers (
type: ClusterIP
) - Node Port - from external cluster to service, where on each port is opened port for this service (
type: NodePort
) - Load Balancer - setting up load balancer before each worker (
type: LoadBalancer
) - Ingress Controller - dedicated pod for traffic from external to worker, (optional via external load balancer), then to ingress controller, then to service, then to pod (
kind: Ingress
)
Kubernetes NodePort vs LoadBalancer vs Ingress? When should I use what?
kubectl delete all --all
kubectl delete -f .
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups: [""]
resources: ["services", "endpoints", "secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
replicas: 1
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
- name: admin
containerPort: 8080
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
type: NodePort
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
hostNetwork: true
containers:
- image: traefik
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
apiVersion: v1
kind: Service
metadata:
name: traefik-web-ui
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- name: web
port: 80
targetPort: 8080
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
spec:
rules:
- host: kube-12
http:
paths:
- path: /
backend:
serviceName: traefik-web-ui
servicePort: web
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: training-v1
spec:
replicas: 1
template:
metadata:
labels:
app: training-v1
spec:
containers:
- name: training-v1
image: kamilbaran/nobleprog_training:training_app_v1
---
apiVersion: v1
kind: Service
metadata:
name: training-v1
spec:
selector:
app: training-v1
ports:
- port: 80
protocol: TCP
targetPort: 80
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: training-v2
spec:
replicas: 1
template:
metadata:
labels:
app: training-v2
spec:
containers:
- name: training-v2
image: kamilbaran/nobleprog_training:training_app_v2
---
apiVersion: v1
kind: Service
metadata:
name: training-v2
spec:
selector:
app: training-v2
sessionAffinity: ClientIP
ports:
- port: 80
protocol: TCP
targetPort: 80
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: training
spec:
rules:
- host: kube-13
http:
paths:
- path: /v1
backend:
serviceName: training-v1
servicePort: 80
- path: /v2
backend:
serviceName: training-v2
servicePort: 80
curl -H Host:kube-13 server-s2/v1
kubectl scale deployment --replicas=10 training
HPA - Horizontal Pod Autoscaler
kubectl top node
kubectl top pod
Horizontal Pod Autoscaler Walkthrough
kubectl run php-apache --image=k8s.gcr.io/hpa-example --requests=cpu=200m --expose --port=80
kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
kubectl get hpa
kubectl run -i --tty load-generator --image=busybox /bin/sh
while true; do wget -q -O- http://php-apache.default.svc.cluster.local; done
kubectl describe pod php-apache-84cc7f889b-9q95r
kubectl edit hpa php-apache
kubectl delete deploy load-generator
kubectl delete svc,deploy php-apache
kubectl delete horizontalpodautoscalers.autoscaling php-apache
-
PVC - PersistentVolumeClaim - storage class
-
PV - PersistentVolume - volume
apiVersion: apps/v1
kind: Deployment
metadata:
name: training
labels:
app: training
spec:
selector:
matchLabels:
app: training
replicas: 3
template:
metadata:
labels:
app: training
spec:
containers:
- name: training
image: kamilbaran/nobleprog_training:training_app_v1
---
apiVersion: v1
kind: Service
metadata:
name: training
labels:
app: training
spec:
type: ClusterIP
selector:
app: training
ports:
- port: 80
protocol: TCP
targetPort: 80
kubectl apply -f https://www.kamilbaran.pl/training/kubernetes/training-app-base.yaml
kubectl get svc,deploy,pod,networkpolicy
kubectl run alpine -it --rm --image=alpine /bin/sh
wget --spider --timeout 1 training
kubectl apply -f https://www.kamilbaran.pl/training/kubernetes/training-network-policy.yaml
kubectl label pod --overwrite alpine-... trusted="yes"
kubectl delete networkpolicies.extensions training
kubectl apply -f training-app-base.yaml
kubectl rollout status deployment training
kubectl rollout undo deployment training