$ sudo apt install conntrack socat ipset -y
/etc/hosts
192.168.56.4 node001 node001.cluster.local
192.168.56.5 node002 node002.cluster.local
192.168.56.6 node003 node003.cluster.local
192.168.56.8 node004 node004.cluster.local # LoadBalancer Node
$ curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/linux/amd64/kubectl -o kubectl
$ chmod +x kubectl && sudo mv kubectl /usr/local/bin/
$ curl -L https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl_1.6.4_linux_amd64 -o cfssl_1.6.4_linux_amd64
$ curl -L https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssljson_1.6.4_linux_amd64 -o cfssljson_1.6.4_linux_amd64
$ chmod +x cfssl_1.6.4_linux_amd64 cfssljson_1.6.4_linux_amd64 && \
sudo mv cfssl_1.6.4_linux_amd64 /usr/local/bin/cfssl && \
sudo mv cfssljson_1.6.4_linux_amd64 /usr/local/bin/cfssljson
$ mkdir -p pki/{admin,api,ca,clients,controller,front-proxy,proxy,scheduler,service-account,users}
To generate the TLS certificates, you’ll need some configurations files; you’ll create each one as you generate the TLS cert
$ export TLS_C="IR" TLS_L="Tehran" TLS_OU="kubernetes" TLS_ST="Tehran"
$ cat > pki/ca/ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}
EOF
$ cat > pki/ca/ca-csr.json <<EOF
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "Kubernetes",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert -initca pki/ca/ca-csr.json | cfssljson -bare pki/ca/ca
$ cat > pki/admin/admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "system:masters",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-profile=kubernetes \
pki/admin/admin-csr.json | cfssljson -bare pki/admin/admin
$ cat > pki/clients/node001-csr.json <<EOF
{
"CN": "system:node:node001",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "system:nodes",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-hostname=node001,192.168.56.4 \
-profile=kubernetes \
pki/clients/node001-csr.json | cfssljson -bare pki/clients/node001
$ cat > pki/clients/node002-csr.json <<EOF
{
"CN": "system:node:node002",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "system:nodes",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-hostname=node002,192.168.56.5 \
-profile=kubernetes \
pki/clients/node002-csr.json | cfssljson -bare pki/clients/node002
$ cat > pki/clients/node003-csr.json <<EOF
{
"CN": "system:node:node003",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "system:nodes",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-hostname=node003,192.168.56.6 \
-profile=kubernetes \
pki/clients/node003-csr.json | cfssljson -bare pki/clients/node003
$ cat > pki/controller/kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "system:kube-controller-manager",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-profile=kubernetes \
pki/controller/kube-controller-manager-csr.json | cfssljson -bare pki/controller/kube-controller-manager
$ cat > pki/proxy/kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "system:node-proxier",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-profile=kubernetes \
pki/proxy/kube-proxy-csr.json | cfssljson -bare pki/proxy/kube-proxy
$ cat > pki/scheduler/kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "system:kube-scheduler",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-profile=kubernetes \
pki/scheduler/kube-scheduler-csr.json | cfssljson -bare pki/scheduler/kube-scheduler
$ cat > pki/front-proxy/front-proxy-csr.json <<EOF
{
"CN": "front-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "front-proxy",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-profile=kubernetes \
pki/front-proxy/front-proxy-csr.json | cfssljson -bare pki/front-proxy/front-proxy
$ KUBERNETES_LOADBALANCER_ADDRESS=192.168.56.8
$ cat > pki/api/kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "Kubernetes",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-hostname=10.32.0.1,192.168.56.4,192.168.56.5,192.168.56.6,${KUBERNETES_LOADBALANCER_ADDRESS},127.0.0.1,kubernetes.default \
-profile=kubernetes \
pki/api/kubernetes-csr.json | cfssljson -bare pki/api/kubernetes
$ cat > pki/service-account/service-account-csr.json <<EOF
{
"CN": "service-accounts",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "${TLS_C}",
"L": "${TLS_L}",
"O": "Kubernetes",
"OU": "${TLS_OU}",
"ST": "${TLS_ST}"
}
]
}
EOF
$ cfssl gencert \
-ca=pki/ca/ca.pem \
-ca-key=pki/ca/ca-key.pem \
-config=pki/ca/ca-config.json \
-profile=kubernetes \
pki/service-account/service-account-csr.json | cfssljson -bare pki/service-account/service-account
$ for instance in node001 node002 node003; do
scp pki/ca/ca.pem pki/clients/${instance}-key.pem pki/clients/${instance}.pem ubuntu@${instance}:~/
done
$ for instance in node001 node002 node003; do
scp pki/ca/ca.pem pki/ca/ca-key.pem pki/api/kubernetes-key.pem pki/api/kubernetes.pem pki/service-account/service-account-key.pem pki/service-account/service-account.pem pki/front-proxy/front-proxy-key.pem pki/front-proxy/front-proxy.pem ubuntu@${instance}:~/
done
$ scp pki/ca/ca.pem pki/api/kubernetes-key.pem pki/api/kubernetes.pem ubuntu@node004:~/
$ mkdir configs
$ for instance in node001 node002 node003; do
kubectl config set-cluster kubernetes\
--certificate-authority=pki/ca/ca.pem \
--embed-certs=true \
--server=https://${KUBERNETES_LOADBALANCER_ADDRESS}:6443 \
--kubeconfig=configs/clients/${instance}.kubeconfig
kubectl config set-credentials system:node:${instance} \
--client-certificate=pki/clients/${instance}.pem \
--client-key=pki/clients/${instance}-key.pem \
--embed-certs=true \
--kubeconfig=configs/clients/${instance}.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:node:${instance} \
--kubeconfig=configs/clients/${instance}.kubeconfig
kubectl config use-context default --kubeconfig=configs/clients/${instance}.kubeconfig
done
$ kubectl config set-cluster kubernetes \
--certificate-authority=pki/ca/ca.pem \
--embed-certs=true \
--server=https://${KUBERNETES_LOADBALANCER_ADDRESS}:6443 \
--kubeconfig=configs/proxy/kube-proxy.kubeconfig
$ kubectl config set-credentials system:kube-proxy \
--client-certificate=pki/proxy/kube-proxy.pem \
--client-key=pki/proxy/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=configs/proxy/kube-proxy.kubeconfig
$ kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-proxy \
--kubeconfig=configs/proxy/kube-proxy.kubeconfig
$ kubectl config use-context default --kubeconfig=configs/proxy/kube-proxy.kubeconfig
$ kubectl config set-cluster kubernetes \
--certificate-authority=pki/ca/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=configs/controller/kube-controller-manager.kubeconfig
$ kubectl config set-credentials system:kube-controller-manager \
--client-certificate=pki/controller/kube-controller-manager.pem \
--client-key=pki/controller/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=configs/controller/kube-controller-manager.kubeconfig
$ kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=configs/controller/kube-controller-manager.kubeconfig
$ kubectl config use-context default --kubeconfig=configs/controller/kube-controller-manager.kubeconfig
$ kubectl config set-cluster kubernetes \
--certificate-authority=pki/ca/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=configs/scheduler/kube-scheduler.kubeconfig
$ kubectl config set-credentials system:kube-scheduler \
--client-certificate=pki/scheduler/kube-scheduler.pem \
--client-key=pki/scheduler/kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=configs/scheduler/kube-scheduler.kubeconfig
$ kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=configs/scheduler/kube-scheduler.kubeconfig
$ kubectl config use-context default --kubeconfig=configs/scheduler/kube-scheduler.kubeconfig
$ kubectl config set-cluster kubernetes \
--certificate-authority=pki/ca/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=configs/admin/admin.kubeconfig
$ kubectl config set-credentials admin \
--client-certificate=pki/admin/admin.pem \
--client-key=pki/admin/admin-key.pem \
--embed-certs=true \
--kubeconfig=configs/admin/admin.kubeconfig
$ kubectl config set-context default \
--cluster=kubernetes \
--user=admin \
--kubeconfig=configs/admin/admin.kubeconfig
$ kubectl config use-context default --kubeconfig=configs/admin/admin.kubeconfig
$ for instance in node001 node002 node003; do
scp configs/clients/${instance}.kubeconfig configs/proxy/kube-proxy.kubeconfig ubuntu@${instance}:~/
done
$ for instance in node001 node002 node003; do
scp configs/admin/admin.kubeconfig configs/controller/kube-controller-manager.kubeconfig configs/scheduler/kube-scheduler.kubeconfig ubuntu@${instance}:~/
done
$ mkdir data-encryption
$ ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
$ cat > data-encryption/encryption-config.yaml <<EOF
apiVersion: apiserver.config.k8s.io/v1
kind: EncryptionConfiguration
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: ${ENCRYPTION_KEY}
- identity: {}
EOF
$ for instance in node001 node002 node003;
do
scp data-encryption/encryption-config.yaml ubuntu@${instance}:~/
done
$ curl -L https://github.com/etcd-io/etcd/releases/download/v3.5.9/etcd-v3.5.9-linux-amd64.tar.gz -o etcd-v3.5.9-linux-amd64.tar.gz
$ tar -xvf etcd-v3.5.9-linux-amd64.tar.gz && sudo mv etcd-v3.5.9-linux-amd64/etcd* /usr/local/bin/
$ sudo mkdir -p /etc/etcd /var/lib/etcd &&
sudo cp ~/ca.pem ~/kubernetes-key.pem ~/kubernetes.pem /etc/etcd/ &&
INTERNAL_IP=$(ip addr show enp0s3 | grep -Po 'inet \K[\d.]+') &&
ETCD_NAME=$(hostname -s)
$ cat <<EOF | sudo tee /etc/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
ExecStart=/usr/local/bin/etcd \\
--name ${ETCD_NAME} \\
--data-dir=/var/lib/etcd \\
--listen-peer-urls https://${INTERNAL_IP}:2380 \\
--listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
--initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
--initial-cluster node001=https://192.168.56.4:2380,node002=https://192.168.56.5:2380,node003=https://192.168.56.6:2380 \\
--initial-cluster-state new \\
--initial-cluster-token etcd-cluster-0 \\
--advertise-client-urls https://${INTERNAL_IP}:2379 \\
--cert-file=/etc/etcd/kubernetes.pem \\
--key-file=/etc/etcd/kubernetes-key.pem \\
--client-cert-auth \\
--trusted-ca-file=/etc/etcd/ca.pem \\
--peer-cert-file=/etc/etcd/kubernetes.pem \\
--peer-key-file=/etc/etcd/kubernetes-key.pem \\
--peer-client-cert-auth \\
--peer-trusted-ca-file=/etc/etcd/ca.pem
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
$ sudo systemctl daemon-reload && \
sudo systemctl enable etcd && \
sudo systemctl start etcd
$ sudo etcdctl member list \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.pem \
--cert=/etc/etcd/kubernetes.pem \
--key=/etc/etcd/kubernetes-key.pem
9bc278c1c7c9443e, started, node002, https://192.168.56.5:2380, https://192.168.56.5:2379, false
a4837854fa751840, started, node001, https://192.168.56.4:2380, https://192.168.56.4:2379, false
aad0cb7dd7b1be78, started, node003, https://192.168.56.6:2380, https://192.168.56.6:2379, false
$ sudo mkdir -p /etc/kubernetes/config && \
curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/linux/amd64/kube-apiserver -o kube-apiserver && \
curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/linux/amd64/kube-controller-manager -o kube-controller-manager && \
curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/linux/amd64/kube-scheduler -o kube-scheduler && \
curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/linux/amd64/kubectl -o kubectl && \
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl && \
sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
$ sudo mkdir -p /var/lib/kubernetes/ && \
sudo cp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
service-account-key.pem service-account.pem \
front-proxy-key.pem front-proxy.pem encryption-config.yaml /var/lib/kubernetes/
$ cat <<EOF | sudo tee /etc/kubernetes/audit-policy.yaml
apiVersion: audit.k8s.io/v1
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
- "RequestReceived"
rules:
# Log pod changes at RequestResponse level
- level: RequestResponse
resources:
- group: ""
# Resource "pods" doesn't match requests to any subresource of pods,
# which is consistent with the RBAC policy.
resources: ["pods"]
# Log "pods/log", "pods/status" at Metadata level
- level: Metadata
resources:
- group: ""
resources: ["pods/log", "pods/status"]
# Don't log requests to a configmap called "controller-leader"
- level: None
resources:
- group: ""
resources: ["configmaps"]
resourceNames: ["controller-leader"]
# Don't log watch requests by the "system:kube-proxy" on endpoints or services
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core API group
resources: ["endpoints", "services"]
# Don't log authenticated requests to certain non-resource URL paths.
- level: None
userGroups: ["system:authenticated"]
nonResourceURLs:
- "/api*" # Wildcard matching.
- "/version"
# Log the request body of configmap changes in kube-system.
- level: Request
resources:
- group: "" # core API group
resources: ["configmaps"]
# This rule only applies to resources in the "kube-system" namespace.
# The empty string "" can be used to select non-namespaced resources.
namespaces: ["kube-system"]
# Log configmap and secret changes in all other namespaces at the Metadata level.
- level: Metadata
resources:
- group: "" # core API group
resources: ["secrets", "configmaps"]
# Log all other resources in core and extensions at the Request level.
- level: Request
resources:
- group: "" # core API group
- group: "extensions" # Version of group should NOT be included.
# A catch-all rule to log all other requests at the Metadata level.
- level: Metadata
# Long-running requests like watches that fall under this rule will not
# generate an audit event in RequestReceived.
omitStages:
- "RequestReceived"
EOF
$ cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--advertise-address=${INTERNAL_IP} \\
--allow-privileged=true \\
--apiserver-count=3 \\
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/audit.log \\
--authorization-mode=Node,RBAC \\
--bind-address=0.0.0.0 \\
--client-ca-file=/var/lib/kubernetes/ca.pem \\
--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
--etcd-cafile=/var/lib/kubernetes/ca.pem \\
--etcd-certfile=/var/lib/kubernetes/kubernetes.pem \\
--etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \\
--etcd-servers=https://192.168.56.4:2379,https://192.168.56.5:2379,https://192.168.56.6:2379 \\
--event-ttl=1h \\
--encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\
--kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\
--kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--proxy-client-cert-file=/var/lib/kubernetes/front-proxy.pem \\
--proxy-client-key-file=/var/lib/kubernetes/front-proxy-key.pem \\
--requestheader-allowed-names=front-proxy-client \\
--requestheader-client-ca-file=/var/lib/kubernetes/ca.pem\\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--runtime-config='api/all=true' \\
--secure-port=6443 \\
--service-account-issuer=https://${KUBERNETES_LOADBALANCER_ADDRESS}:6443 \\
--service-account-key-file=/var/lib/kubernetes/service-account.pem \\
--service-account-signing-key-file=/var/lib/kubernetes/service-account-key.pem \\
--service-cluster-ip-range=10.32.0.0/24 \\
--service-node-port-range=30000-32767 \\
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
$ sudo cp ~/kube-controller-manager.kubeconfig /var/lib/kubernetes/
$ cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--allocate-node-cidrs=true \\
--bind-address=0.0.0.0 \\
--cluster-cidr=10.200.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
--leader-elect=true \\
--root-ca-file=/var/lib/kubernetes/ca.pem \\
--service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\
--service-cluster-ip-range=10.32.0.0/24 \\
--use-service-account-credentials=true \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
$ sudo cp ~/kube-scheduler.kubeconfig /var/lib/kubernetes/
$ cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml
apiVersion: kubescheduler.config.k8s.io/v1beta3
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig"
leaderElection:
leaderElect: true
EOF
$ cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--config=/etc/kubernetes/config/kube-scheduler.yaml \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
$ sudo systemctl daemon-reload && \
sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler && \
sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler
Since the /healthz check sits on port 6443, and you’re not exposing that, you need to get nginx installed as a proxy
$ sudo apt install -y nginx
$ cat <<EOF | sudo tee kubernetes.default.svc.cluster.local
server {
listen 80;
server_name kubernetes.default.svc.cluster.local;
location /healthz {
proxy_pass https://127.0.0.1:6443/healthz;
proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem;
}
}
EOF
$ sudo rm /etc/nginx/sites-enabled/default && \
sudo mv kubernetes.default.svc.cluster.local /etc/nginx/sites-available/kubernetes.default.svc.cluster.local && \
sudo ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/ && \
sudo systemctl restart nginx
$ curl -k https://localhost:6443/livez?verbose
$ curl -H "Host: kubernetes.default.svc.cluster.local" -i http://127.0.0.1/healthz
$ cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
EOF
$ cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF
$ sudo apt install haproxy -y
$ sudo openssl req -nodes -new -x509 -keyout /etc/haproxy/api_key.pem -out /etc/haproxy/api_cert.pem -days 365
Generating a RSA private key
+++............................................+++++
writing new private key to '/etc/haproxy/api_key.pem'
-----
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [AU]:IR
State or Province Name (full name) [Some-State]:Tehran
Locality Name (eg, city) []:Tehran
Organization Name (eg, company) [Internet Widgits Pty Ltd]:Kubernetes
Organizational Unit Name (eg, section) []:Kubernetes
Common Name (e.g. server FQDN or YOUR name) []:node004.cluster.local
Email Address []:admin@cluster.local
$ sudo sh -c "cat /etc/haproxy/api_key.pem /etc/haproxy/api_cert.pem > /etc/haproxy/k8s_api.pem"
$ sudo nano /etc/haproxy/haproxy.cfg
### Controllers ###
frontend apiservers
bind *:80
bind *:443 ssl crt /etc/haproxy/k8s_api.pem
http-request redirect scheme https unless { ssl_fc }
mode http
option forwardfor
default_backend k8s_apiservers
frontend kube_api
bind *:6443
mode tcp
option tcplog
default_backend k8s_apiservers_6443
backend k8s_apiservers
mode http
balance roundrobin
option forwardfor
option httpchk GET / HTTP/1.1\r\nHost:kubernetes.default.svc.cluster.local
default-server inter 10s fall 2
server node001 192.168.56.4:80 check
server node002 192.168.56.5:80 check
server node003 192.168.56.6:80 check
backend k8s_apiservers_6443
mode tcp
option ssl-hello-chk
option log-health-checks
default-server inter 10s fall 2
server node001 192.168.56.4:6443 check
server node002 192.168.56.5:6443 check
server node003 192.168.56.6:6443 check
$ systemctl restart haproxy
$ curl -k https://node004.cluster.local/healthz
ok
$ curl -k https://node004.cluster.local:6443/version
{
"major": "1",
"minor": "28",
"gitVersion": "v1.28.2",
"gitCommit": "89a4ea3e1e4ddd7f7572286090359983e0387b2f",
"gitTreeState": "clean",
"buildDate": "2023-09-13T09:29:07Z",
"goVersion": "go1.20.8",
"compiler": "gc",
"platform": "linux/amd64"
}
$ curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.28.0/crictl-v1.28.0-linux-amd64.tar.gz -o crictl-v1.28.0-linux-amd64.tar.gz
$ curl -L https://github.com/opencontainers/runc/releases/download/v1.1.9/runc.amd64 -o runc.amd64
$ curl -L https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-amd64-v1.3.0.tgz -o cni-plugins-linux-amd64-v1.3.0.tgz
$ curl -L https://github.com/containerd/containerd/releases/download/v1.6.24/containerd-1.6.24-linux-amd64.tar.gz -o containerd-1.6.24-linux-amd64.tar.gz
$ curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/linux/amd64/kubelet -o kubelet
$ curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/linux/amd64/kube-proxy -o kube-proxy
$ curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/linux/amd64/kubectl -o kubectl
$ sudo mkdir -p \
/etc/cni/net.d \
/opt/cni/bin \
/var/lib/kubelet \
/var/lib/kube-proxy \
/var/lib/kubernetes \
/var/run/kubernetes
$ mkdir containerd && \
tar -xvf crictl-v1.28.0-linux-amd64.tar.gz && \
tar -xvf containerd-1.6.24-linux-amd64.tar.gz -C containerd && \
sudo tar -xvf cni-plugins-linux-amd64-v1.3.0.tgz -C /opt/cni/bin/ && \
sudo mv runc.amd64 runc && \
chmod +x crictl kube-proxy kubelet runc && \
sudo mv crictl kube-proxy kubelet runc /usr/local/bin/ && \
sudo mv containerd/bin/* /bin/
$ chmod +x kubectl && sudo mv kubectl /usr/local/bin/
$ cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
{
"cniVersion": "0.4.0",
"name": "bridge",
"type": "bridge",
"bridge": "cnio0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"ranges": [
[{"subnet": "10.200.1.0/24"}]
],
"routes": [{"dst": "0.0.0.0/0"}]
}
}
EOF
$ cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
{
"cniVersion": "0.4.0",
"name": "bridge",
"type": "bridge",
"bridge": "cnio0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"ranges": [
[{"subnet": "10.200.2.0/24"}]
],
"routes": [{"dst": "0.0.0.0/0"}]
}
}
EOF
$ cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
{
"cniVersion": "0.4.0",
"name": "bridge",
"type": "bridge",
"bridge": "cnio0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"ranges": [
[{"subnet": "10.200.3.0/24"}]
],
"routes": [{"dst": "10.200.3.0/24"}]
}
}
EOF
$ cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
{
"cniVersion": "0.4.0",
"name": "lo",
"type": "loopback"
}
EOF
$ sudo mkdir -p /etc/containerd/
$ cat << EOF | sudo tee /etc/containerd/config.toml
[plugins]
[plugins.cri.containerd]
snapshotter = "overlayfs"
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runc"
runtime_root = ""
EOF
$ cat <<EOF | sudo tee /etc/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
$ sudo cp ~/${HOSTNAME}-key.pem ~/${HOSTNAME}.pem /var/lib/kubelet/ && \
sudo cp ~/${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig && \
sudo cp ~/ca.pem /var/lib/kubernetes/
$ cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/var/lib/kubernetes/ca.pem"
authorization:
mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
- "10.32.0.10"
podCIDR: "10.200.1.0/24"
resolvConf: "/run/systemd/resolve/resolv.conf"
runtimeRequestTimeout: "15m"
tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem"
tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem"
EOF
$ cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/var/lib/kubernetes/ca.pem"
authorization:
mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
- "10.32.0.10"
podCIDR: "10.200.2.0/24"
resolvConf: "/run/systemd/resolve/resolv.conf"
runtimeRequestTimeout: "15m"
tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem"
tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem"
EOF
$ cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/var/lib/kubernetes/ca.pem"
authorization:
mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
- "10.32.0.10"
podCIDR: "10.200.3.0/24"
resolvConf: "/run/systemd/resolve/resolv.conf"
runtimeRequestTimeout: "15m"
tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem"
tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem"
EOF
$ cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--config=/var/lib/kubelet/kubelet-config.yaml \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
--kubeconfig=/var/lib/kubelet/kubeconfig \\
--register-node=true \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
$ sudo cp kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
$ cat <<EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
kubeconfig: "/var/lib/kube-proxy/kubeconfig"
mode: "iptables"
clusterCIDR: "10.200.0.0/16"
EOF
$ cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/var/lib/kube-proxy/kube-proxy-config.yaml
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
$ sudo swapoff -a
$ free -h
$ sudo systemctl daemon-reload && \
sudo systemctl enable containerd kubelet kube-proxy && \
sudo systemctl start containerd kubelet kube-proxy
$ kubectl get nodes --kubeconfig admin.kubeconfig
NAME STATUS ROLES AGE VERSION
node001 Ready <none> 113s v1.28.2
node002 Ready <none> 38s v1.28.2
node003 Ready <none> 35s v1.28.2
$ KUBERNETES_LOADBALANCER_ADDRESS=192.168.56.8
$ kubectl config set-cluster kubernetes \
--certificate-authority=pki/ca/ca.pem \
--embed-certs=true \
--server=https://${KUBERNETES_LOADBALANCER_ADDRESS}:6443 \
--kubeconfig=configs/admin/admin-remote.kubeconfig
$ kubectl config set-credentials admin \
--client-certificate=pki/admin/admin.pem \
--client-key=pki/admin/admin-key.pem \
--embed-certs=true \
--kubeconfig=configs/admin/admin-remote.kubeconfig
$ kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin \
--kubeconfig=configs/admin/admin-remote.kubeconfig
kubectl config use-context kubernetes --kubeconfig=configs/admin/admin-remote.kubeconfig
$ cp configs/admin/admin-remote.kubeconfig ~/.kube/config
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
node001 Ready <none> 8m54s v1.28.2
node002 Ready <none> 7m39s v1.28.2
node003 Ready <none> 7m36s v1.28.2
$ curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml -o calico.yaml
$ sed -i 's/etcd_endpoints:\ "http:\/\/<ETCD_IP>:<ETCD_PORT>"/etcd_endpoints:\ "https:\/\/192.168.56.4:2379,https:\/\/192.168.56.5:2379,https:\/\/192.168.56.6:2379"/g' calico.yaml
$ sed -i "s/# etcd-cert: null/etcd-cert: $(cat pki\/api\/kubernetes.pem | base64 -w 0)/g" calico.yaml
$ sed -i "s/# etcd-key: null/etcd-key: $(cat pki\/api\/kubernetes-key.pem | base64 -w 0)/g" calico.yaml
$ sed -i "s/# etcd-ca: null/etcd-ca: $(cat pki\/ca\/ca.pem | base64 -w 0)/g" calico.yaml
$ sed -i "s/etcd_ca: \"\"/etcd_ca: \"\/calico-secrets\/etcd-ca\"/g" calico.yaml
$ sed -i "s/etcd_cert: \"\"/etcd_cert: \"\/calico-secrets\/etcd-cert\"/g" calico.yaml
$ sed -i "s/etcd_key: \"\"/etcd_key: \"\/calico-secrets\/etcd-key\"/g" calico.yaml
$ sed -i 's/# - name: CALICO_IPV4POOL_CIDR/- name: CALICO_IPV4POOL_CIDR/g' calico.yaml
$ sed -i 's/# value: "192.168.0.0\/16"/ value: "10.200.0.0\/16"/g' calico.yaml
$ kubectl apply -f calico.yaml
$ kubectl get po -n kube-system -w
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-7ddc4f45bc-tc9wm 1/1 Running 0 75s
calico-node-55884 1/1 Running 0 17m
calico-node-dxbg8 1/1 Running 0 17m
calico-node-njq7w 1/1 Running 0 17m
$ sudo sysctl net.ipv4.conf.all.forwarding=1
$ helm repo add coredns https://coredns.github.io/helm
$ helm --namespace=kube-system install coredns coredns/coredns
$ kubectl -n kube-system get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-coredns-967ddbb6d-bg9wh 1/1 Running 0 4m51s 10.200.2.5 node002 <none> <none>
$ dig @10.200.2.5 kubernetes.default.svc.cluster.local +noall +answer
kubernetes.default.svc.cluster.local. 21 IN A 10.32.0.1
$ kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
node001 Ready <none> 36h v1.28.2 192.168.56.4 <none> Ubuntu 20.04.6 LTS 5.4.0-163-generic containerd://1.6.24
node002 Ready <none> 36h v1.28.2 192.168.56.5 <none> Ubuntu 20.04.6 LTS 5.4.0-163-generic containerd://1.6.24
node003 Ready <none> 36h v1.28.2 192.168.56.6 <none> Ubuntu 20.04.6 LTS 5.4.0-163-generic containerd://1.6.24
$ kubectl get all --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system pod/calico-kube-controllers-7ddc4f45bc-tc9wm 1/1 Running 0 36h 10.200.1.3 node001 <none> <none>
kube-system pod/calico-node-55884 1/1 Running 0 36h 192.168.56.5 node002 <none> <none>
kube-system pod/calico-node-dxbg8 1/1 Running 0 36h 192.168.56.4 node001 <none> <none>
kube-system pod/calico-node-njq7w 1/1 Running 0 36h 192.168.56.6 node003 <none> <none>
kube-system pod/coredns-coredns-967ddbb6d-bg9wh 1/1 Running 0 14m 10.200.2.5 node002 <none> <none>
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default service/kubernetes ClusterIP 10.32.0.1 <none> 443/TCP 40h <none>
kube-system service/coredns-coredns ClusterIP 10.32.0.117 <none> 53/UDP,53/TCP 40m app.kubernetes.io/instance=coredns,app.kubernetes.io/name=coredns,k8s-app=coredns
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
kube-system daemonset.apps/calico-node 3 3 3 3 3 kubernetes.io/os=linux 36h calico-node docker.io/calico/node:v3.26.1 k8s-app=calico-node
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
kube-system deployment.apps/calico-kube-controllers 1/1 1 1 36h calico-kube-controllers docker.io/calico/kube-controllers:v3.26.1 k8s-app=calico-kube-controllers
kube-system deployment.apps/coredns-coredns 1/1 1 1 40m coredns coredns/coredns:1.11.1 app.kubernetes.io/instance=coredns,app.kubernetes.io/name=coredns,k8s-app=coredns
NAMESPACE NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
kube-system replicaset.apps/calico-kube-controllers-7ddc4f45bc 1 1 1 36h calico-kube-controllers docker.io/calico/kube-controllers:v3.26.1 k8s-app=calico-kube-controllers,pod-template-hash=7ddc4f45bc
kube-system replicaset.apps/coredns-coredns-967ddbb6d 1 1 1 40m coredns coredns/coredns:1.11.1 app.kubernetes.io/instance=coredns,app.kubernetes.io/name=coredns,k8s-app=coredns,pod-template-hash=967ddbb6d