Controle plane (Master)
- kube apiserver
- kube controller manager
- kube scheduler
- etcd
Nodes
- kubelet
- kube proxy
Create Sef signed Root SSL certificate (CA master node)
openssl genrsa -out ca.key 2048
openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA" -out ca.csr
openssl x509 -req -in ca.csr -signkey site.key -out ca.crt
openssl rsa -in ca.key -pubout > ca.pem
First create the openssl.cnf
who describe certificate with DNS names ...
openssl genrsa -out apiserver.key 2048
openssl req -new -key apiserver.key -subj "/CN=kube-apiserver" -out apiserver.csr -config openssl.cnf
openssl x509 -req -in apiserver.csr -CA ca.crt -CAKey ca.key -out apiserver.crt
Create signed certificate for admin user with admin prileges
openssl genrsa -out admin.key 2048
# Signing certificate using root cert
openssl req -new -key ca.key -subj "/CN=kube-admin/O=system:masters" -out admin.csr
# Signed certificate
openssl x509 -req -in admin.csr -CA ca.crt -CAKey ca.key -out admin.crt -CAcreateserial
TIIP : curl API using certificates
curl https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version --key admin.key --cert admin.crt --cacert ca.crt
CertificateSigningRequest
DOC : Reference / API Access Control / Certificate Signing Requests
# Help
kubectl explain csr
kubectl explain csr.spec
# Create
cat <<EOF > john-csr.yaml
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: john
spec:
groups:
- system:authenticated
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFV........
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
EOF
k apply -f john-csr.yaml
Manage CertificateSigningRequest (csr)
k get csr
kubectl get csr/john -o yaml
k certificate approve john
k certificate deny johny
Avoid to specify servers urls + credentials files when using kublectl command line :
Ex: without kubeconfig
kubectl get pods \
--server my-api-server:6443 \
--client-key admin.key \
--client-certificate adlin.crt \
--certificate-authority ca.crt
Kubeconfig file structure = clusters + users +contextes
kind: Config
apiVersion: v1
current-context: admin@cluster1
clusters:
- name: cluster1
cluster:
server: https://my-api-server:6443
certificate-authority-data: BASE64_ENCODED_CA_CRT...
contextes:
- name: admin@cluster1
context:
user: admin
cluster: cluster1
namespace: staging
users:
- name: admin
user:
client-certificate-data: BASE64_ENCODED_CRT...
client-key-data: BASE64_ENCODED_CRT...
- name: minikube
user:
client-certificate: /home/user/.minikube/client.crt
client-key: /home/user/.minikube/client.key
KubeConfig infos | |
---|---|
Get kubectl config help | kubectl config -h |
Default location | $HOME/.kube/config |
Define custom config for kubectl | export KUBECONFIG=/path/to/config/file |
Show current config command | kubectl config view |
Specify in kubectl | kubectl get pod --kubeconfig=path/to/config/file |
Create context | kubectl config set-context admin@cluster1-prod --user admin --namespace prod --cluster cluster1 |
Change current context | kubectl config use-context dev@cluster2 |
Resources actions :
# Impersonnate :
kubectl auth can-i create pod --as-user mike --namespace production
# Liste Namspaced / Cluster scoped resoources
kubectl api-resources --namespaced=false
kubectl expain NetworkPolicy
HIW : Create policy rules (Ingress: from, Egress: to) using labels matching
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: db-plicy
spec:
podSelector
matchLabels:
role: db
policyTypes:
- Ingress
ingress:
- from:
- podSelector
matchLabels:
name: api-pod
ports:
- protocol: TCP
port: 3306
kubectl proxy &
curl http://localhost:8001/api -k
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text -noout
\n
:cat my-cert.csr | base64 - w 0
```sh
* Create an kubeconfig from admin certs
```sh
{
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.crt \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=admin.kubeconfig
kubectl config set-credentials admin \
--client-certificate=admin.crt \
--client-key=admin.key \
--embed-certs=true \
--kubeconfig=admin.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=admin \
--kubeconfig=admin.kubeconfig
kubectl config use-context default --kubeconfig=admin.kubeconfig
}
https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-cluster bootstrap --server='https://${KUBERNETES_LB_IP}:6443' --certificate-authority=/var/lib/kubernetes/ca.crt
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-credentials kubelet-bootstrap --token=07401b.f395accd246ae52d
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-context bootstrap --user=kubelet-bootstrap --cluster=bootstrap
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig use-context bootstrap
TUTORIALS :
//...
# Service status / restart
sudo service myservice status
sudo service myservice restart
# Check service configuration
cat /etc/systemd/system/myservice.service
# Reload systemctl
sudo systemctl daemon-reload
# Show logs
sudo journalctl -u myservice
sudo ETCDCTL_API=3 etcdctl member list \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.crt \
--cert=/etc/etcd/etcd-server.crt \
--key=/etc/etcd/etcd-server.key
kubectl get componentstatuses --kubeconfig admin.kubeconfig
Cluster version
kubectl version --short
kubectl get nodes -o wide
Free a node before maintenance
k drain node01
# After maintenance make node01 available to be scheduled
k uncordon node01
Check if laster can be scheduled for workload pod
k describe node master | grep tain
Make node unschedulabel (critical resoucres)
k describe cordon master
Get next stable release vesion available
kubeadm upgrade plan
kubectl drain master
kubeadm version
# Upgrade kubeadm to targeted version
sudo apt installl kubeadm=1.18.0-00
kubeadm upgrade apply v1.18.0
kubectl version --short
# Upgrade kubelet to targeted version
sudo apt installl kubelet=1.18.0-00
kubectl uncordon master
kubectl get nodes
kubectl drain node01
ssh node01
sudo apt installl kubeadm=1.18.0-00
kubeadm upgrade node
sudo apt installl kubelet=1.18.0-00
kubectl uncordon node01
kubectl get nodes
Get ETCD version
k describe pod -n kube-system etcd-master-pod | grep Image
Test ETCDCTL command parameters etcdctl members list
ETCDCTL_API=3 etcdctl members list \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/ca.key \
--endpoints=127.0.0.1:2379
Take ETCD spnapshot
ETCDCTL_API=3 etcdctl snapshot save -h
SNAPSHOT_LOCATION=/tmp/snapshot.db
ETCDCTL_API=3 etcdctl snapshot save \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
--endpoints=127.0.0.1:2379 $SNAPSHOT_LOCATION
Restore backup
ETCDCTL_API=3 etcdctl snapshot restore -h
SNAPSHOT_LOCATION=/tmp/snapshot.db
ETCDCTL_API=3 etcdctl snapshot restore \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
--endpoints=127.0.0.1:2379 \
--name=master \
--initial-cluster="master=https://127.0.0.1:2380" \
--initial-advertise-peer-urls="https://127.0.0.1:2380" \
--initial-cluster-token="etcd-cluster-001" \
--data-dir=/var/lib/etcd-from-backup $SNAPSHOT_LOCATION
# IMPORTANT : Edit etcd config and set parameters (initial-cluster-token, data-dir) :
vi /etc/kubernetes/manifests/etcd.yaml
# --initial-cluster-token="etcd-cluster-001"
# --data-dir=/var/lib/etcd-from-backup (+ volumeMounts.mountPath + volumes.path )
ETCD static pod i auomatically recreated. Test liste member should display a new member
ETCDCTL_API=3 etcdctl members list --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/ca.key -endpoints=127.0.0.1:2379
k get pod,svc,deployment
# list interfaces
ip link
# ip associated to inet's
ip addr
ip addr add 192.168.1.10/24 dev eth0
# List or add routes
ip r
ip route
ip route add 192.168.2.10/24 via 192.168.1.1
ifconfifg ens3
ip link
Add local DNS entries
echo "192.168.1.3" >> /etc/hosts
Show DNS server
cat /etc/resolv.conf
ip netns add blue
# list interfaces inside blue net-ns
ip netns exec blue ip link
Exec in a netns : `ip netns exec == ip -n
ip netns exec blue ip link
# or
ip -n blue ip link
Add virtual links between net namespaces interaces (veth)
# Create veth and link two ns
ip link add veth-red type veth peer name veth-blue
ip link set veth-red netns red
ip link set veth-blue netns blue
# Give ip addr to veth's
ip -n red addr add 192.168.15.1 dev veth-red
ip -n blue addr add 192.168.15.2 dev veth-blue
# Activate
ip -n red link set veth-red up
ip -n red link set veth-red up
Make a ping to a netns ip : ip netns exec
ip -n red ping 192.168.15.2
Check if machine is allowed to forward ip trafic :
cat /proc/sys/net/ipv4/ip_forward
A specification to simplify containers networks management
Ex K8S CNI : https://kubernetes.io/docs/concepts/cluster-administration/addons/
NB : Docker posssède sa propre implémentation de la gestion des réseau (CNM : Container Network Model) différente de CNI. Ainsi lorsque Kubernetes itulise docker il désactive la gestion du réseau et rajoute lui-même la couche CNI.
docker run --network=non nginx
bridge add xaxaxa /var/run/netns/xaxaxa
k describe service myservice
k get pod --show-labels
k describe pod mypod
k logs mypod
k logs mypod --previous
# Kubernetes hard way
sudo journalctl -u etcd etcd.service -l
sudo journalctl -u kube-apiserver
# Continers installation (kubeadm)
kubectl logs etcd-master -n kube-system
kubectl logs kube-apiserver-master -n kube-system
docker logs 8c1a23efd
top
df -h
sudo journalctl –u kubelet
openssl x509 -in /var/lib/kubelet/worker-1.crt -text
Certified Kubernetes Administrator cheat sheet