Skip to content

Instantly share code, notes, and snippets.

@MaxRink
Created October 27, 2021 10:29
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save MaxRink/8d95f60fc518c6cb053aec37f4fe74ed to your computer and use it in GitHub Desktop.
Save MaxRink/8d95f60fc518c6cb053aec37f4fe74ed to your computer and use it in GitHub Desktop.
## template: jinja
#cloud-config
write_files:
- path: /etc/kubernetes/pki/ca.crt
owner: root:root
permissions: '0640'
content: |
-----BEGIN CERTIFICATE-----
-----END CERTIFICATE-----
- path: /etc/kubernetes/pki/ca.key
owner: root:root
permissions: '0600'
content: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
- path: /etc/kubernetes/pki/etcd/ca.crt
owner: root:root
permissions: '0640'
content: |
-----BEGIN CERTIFICATE-----
-----END CERTIFICATE-----
- path: /etc/kubernetes/pki/etcd/ca.key
owner: root:root
permissions: '0600'
content: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
- path: /etc/kubernetes/pki/front-proxy-ca.crt
owner: root:root
permissions: '0640'
content: |
-----BEGIN CERTIFICATE-----
-----END CERTIFICATE-----
- path: /etc/kubernetes/pki/front-proxy-ca.key
owner: root:root
permissions: '0600'
content: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
- path: /etc/kubernetes/pki/sa.pub
owner: root:root
permissions: '0640'
content: |
-----BEGIN PUBLIC KEY-----
-----END PUBLIC KEY-----
- path: /etc/kubernetes/pki/sa.key
owner: root:root
permissions: '0600'
content: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
- path: /etc/kubernetes/encryption-provider.yaml
owner: root:root
permissions: '0600'
content: |
apiVersion: apiserver.config.k8s.io/v1
kind: EncryptionConfiguration
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
# TODO Generate this on the system
secret: aaaaaaaa
- identity: {}
- path: /etc/kubernetes/admission-control-config.yaml
owner: root:root
permissions: '0600'
content: |
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: EventRateLimit
path: /etc/kubernetes/event-rate-limit-config.yaml
- path: /etc/kubernetes/event-rate-limit-config.yaml
owner: root:root
permissions: '0600'
content: |
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
kind: Configuration
limits:
- type: Namespace
qps: 50
burst: 100
cacheSize: 2000
- type: User
qps: 10
burst: 50
- path: /etc/kubernetes/audit-policy.yaml
owner: root:root
permissions: '0600'
content: |
# The following audit policy is based on three sources from upstream:
# - the kubernetes docs example: https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/audit/audit-policy.yaml
# - the GCE reference policy: https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/configure-helper.sh#L784
# - UK Ministry of Justice policy: https://github.com/ministryofjustice/cloud-platform-infrastructure/blob/master/kops/live-1.yaml
#
apiVersion: audit.k8s.io/v1beta1
kind: Policy
omitStages:
- "RequestReceived"
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
namespaces: ["ingress-controllers"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
resourceNames: ["ingress-controller-leader-nginx"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log authenticated requests to certain non-resource URL paths.
- level: None
userGroups: ["system:authenticated"]
nonResourceURLs:
- "/api*"
# Don't log events requests.
- level: None
resources:
- group: "" # core
resources: ["events"]
# Log "pods/log", "pods/status" at Metadata level
- level: Metadata
resources:
- group: ""
resources: ["pods/log", "pods/status"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
# Get repsonses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
# Default level for known APIs
- level: RequestResponse
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
- path: /etc/sysctl.d/90-kubelet.conf
owner: root:root
content: |
vm.overcommit_memory=1
kernel.panic=10
kernel.panic_on_oops=1
- path: /etc/kubernetes/manifests/kube-vip.yaml
owner: root:root
content: |
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- start
env:
- name: vip_arp
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_address
value: 172.22.132.226
- name: vip_interface
value: eth0
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
image: plndr/kube-vip:0.1.8
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_TIME
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
type: FileOrCreate
name: kubeconfig
status: {}
- path: /usr/local/bin/kubeadm-bootstrap-script
owner: root
permissions: '0755'
content: |
#!/bin/bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Log an error and exit.
# Args:
# $1 Message to log with the error
# $2 The error code to return
log::error_exit() {
local message="${1}"
local code="${2}"
log::error "${message}"
#
log::info "Removing member from cluster status"
kubeadm reset -f update-cluster-status || true
log::info "Removing etcd member"
kubeadm reset -f remove-etcd-member || true
#
log::info "Resetting kubeadm"
kubeadm reset -f || true
log::error "cluster.x-k8s.io kubeadm bootstrap script $0 exiting with status ${code}"
exit "${code}"
}
log::success_exit() {
log::info "cluster.x-k8s.io kubeadm bootstrap script $0 finished"
exit 0
}
# Log an error but keep going.
log::error() {
local message="${1}"
timestamp=$(date --iso-8601=seconds)
echo "!!! [${timestamp}] ${1}" >&2
shift
for message; do
echo " ${message}" >&2
done
}
# Print a status line. Formatted to show up in a stream of output.
log::info() {
timestamp=$(date --iso-8601=seconds)
echo "+++ [${timestamp}] ${1}"
shift
for message; do
echo " ${message}"
done
}
check_kubeadm_command() {
local command="${1}"
local code="${2}"
case ${code} in
"0")
log::info "kubeadm reported successful execution for ${command}"
;;
"1")
log::error "kubeadm reported failed action(s) for ${command}"
;;
"2")
log::error "kubeadm reported preflight check error during ${command}"
;;
"3")
log::error_exit "kubeadm reported validation error for ${command}" "${code}"
;;
*)
log::error "kubeadm reported unknown error ${code} for ${command}"
;;
esac
}
function retry-command() {
n=0
local kubeadm_return
until [ $n -ge 5 ]; do
log::info "running '$*'"
# shellcheck disable=SC1083
"$@" --config=/run/kubeadm/kubeadm-join-config.yaml
kubeadm_return=$?
check_kubeadm_command "'$*'" "${kubeadm_return}"
if [ ${kubeadm_return} -eq 0 ]; then
break
fi
# We allow preflight errors to pass
if [ ${kubeadm_return} -eq 2 ]; then
break
fi
n=$((n + 1))
sleep 15
done
if [ ${kubeadm_return} -ne 0 ]; then
log::error_exit "too many errors, exiting" "${kubeadm_return}"
fi
}
#
function try-or-die-command() {
local kubeadm_return
log::info "running '$*'"
# shellcheck disable=SC1083
"$@" --config=/run/kubeadm/kubeadm-join-config.yaml
kubeadm_return=$?
check_kubeadm_command "'$*'" "${kubeadm_return}"
if [ ${kubeadm_return} -ne 0 ]; then
log::error_exit "fatal error, exiting" "${kubeadm_return}"
fi
}
#
retry-command kubeadm join phase preflight --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests
#
retry-command kubeadm join phase control-plane-prepare download-certs
retry-command kubeadm join phase control-plane-prepare certs
retry-command kubeadm join phase control-plane-prepare kubeconfig
retry-command kubeadm join phase control-plane-prepare control-plane
#
retry-command kubeadm join phase kubelet-start
#
try-or-die-command kubeadm join phase control-plane-join etcd
retry-command kubeadm join phase control-plane-join update-status
retry-command kubeadm join phase control-plane-join mark-control-plane
#
log::success_exit
- path: /run/kubeadm/kubeadm-join-config.yaml
owner: root:root
permissions: '0640'
content: |
apiVersion: kubeadm.k8s.io/v1beta2
controlPlane:
localAPIEndpoint: {}
discovery:
bootstrapToken:
apiServerEndpoint: 172.0.0.1:6443
caCertHashes:
- sha256:523a3c9edf160b5ead4cfb1f35a1859d7f6775ea8263be2c3393eda67dc3a446
token: pqz49f.ssssss
kind: JoinConfiguration
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
container-log-max-files: "5"
container-log-max-size: 10Mi
event-qps: "0"
protect-kernel-defaults: "true"
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
name: '{{ ds.meta_data.instance_id }}'
- path: /run/cluster-api/placeholder
owner: root:root
permissions: '0640'
content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)"
runcmd:
- "sysctl -p /etc/sysctl.d/90-kubelet.conf"
- "hostname \"{{ ds.meta_data.instance_id }}\""
- "echo \"::1 ipv6-localhost ipv6-loopback\" >/etc/hosts"
- "echo \"127.0.0.1 localhost\" >>/etc/hosts"
- "echo \"127.0.0.1 {{ ds.meta_data.instance_id }}\" >>/etc/hosts"
- "echo \"10.27.50.250 harbor.sce-dcn.net\" >>/etc/hosts"
- "echo \"{{ ds.meta_data.instance_id }}\" >/etc/hostname"
- /usr/local/bin/kubeadm-bootstrap-script && echo success > /run/cluster-api/bootstrap-success.complete
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment