Skip to content

Instantly share code, notes, and snippets.

@jkremser
Created March 30, 2023 09:58
Show Gist options
  • Save jkremser/7fb07237a9c75a81cb03dd87ee181b13 to your computer and use it in GitHub Desktop.
Save jkremser/7fb07237a9c75a81cb03dd87ee181b13 to your computer and use it in GitHub Desktop.
---
# Source: cluster-vsphere/templates/cluster.yaml
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: gjirk25
namespace: default
annotations:
cluster.giantswarm.io/description: "test cluster"
labels:
cluster-apps-operator.giantswarm.io/watching: ""
giantswarm.io/service-priority: "highest"
app: "cluster-vsphere"
app.kubernetes.io/managed-by: "Helm"
cluster.x-k8s.io/cluster-name: "gjirk25"
giantswarm.io/cluster: "gjirk25"
giantswarm.io/organization: "giantswarm"
application.giantswarm.io/team: "rocket"
app.kubernetes.io/version: "0.3.0"
helm.sh/chart: "cluster-vsphere-0.3.0"
spec:
clusterNetwork:
pods:
cidrBlocks:
- 192.168.0.0/16
# services:
# cidrBlocks:
#
# - 192.168.1.0/16
#
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: gjirk25
namespace: default
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereCluster
name: gjirk25
namespace: default
---
# Source: cluster-vsphere/templates/kubeadmconfigtemplate.yaml
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: gjirk25-worker-8ac3116f
namespace: default
labels:
app: "cluster-vsphere"
app.kubernetes.io/managed-by: "Helm"
cluster.x-k8s.io/cluster-name: "gjirk25"
giantswarm.io/cluster: "gjirk25"
giantswarm.io/organization: "giantswarm"
application.giantswarm.io/team: "rocket"
app.kubernetes.io/version: "0.3.0"
helm.sh/chart: "cluster-vsphere-0.3.0"
spec:
template:
spec:
users:
- name: giantswarm
sudo: ALL=(ALL) NOPASSWD:ALL
joinConfiguration:
nodeRegistration:
criSocket: /run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
feature-gates: "ExpandPersistentVolumes=true"
eviction-hard : "memory.available<200Mi"
eviction-max-pod-grace-period: "60"
eviction-soft: "memory.available<500Mi"
eviction-soft-grace-period: "memory.available=5s"
anonymous-auth: "true"
node-labels: "giantswarm.io/node-pool=worker"
files:
- path: /etc/ssh/trusted-user-ca-keys.pem
permissions: "0600"
content: |
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM4cvZ01fLmO9cJbWUj7sfF+NhECgy+Cl0bazSrZX7sU vault-ca@vault.operations.giantswarm.io
- path: /etc/ssh/sshd_config
permissions: "0600"
content: |
# Use most defaults for sshd configuration.
Subsystem sftp internal-sftp
ClientAliveInterval 180
UseDNS no
UsePAM yes
PrintLastLog no # handled by PAM
PrintMotd no # handled by PAM
# Non defaults (#100)
ClientAliveCountMax 2
PasswordAuthentication no
TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem
MaxAuthTries 5
LoginGraceTime 60
AllowTcpForwarding no
AllowAgentForwarding no
preKubeadmCommands:
- hostname '{{ ds.meta_data.hostname }}'
- echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts
- echo "127.0.0.1 localhost" >>/etc/hosts
- echo '127.0.0.1 {{ ds.meta_data.hostname }}' >>/etc/hosts
- echo '{{ ds.meta_data.hostname }}' >/etc/hostname
postKubeadmCommands:
- systemctl restart sshd
---
# Source: cluster-vsphere/templates/kubeadmcontrolplane.yaml
#todo: this
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: gjirk25
namespace: default
labels:
app: "cluster-vsphere"
app.kubernetes.io/managed-by: "Helm"
cluster.x-k8s.io/cluster-name: "gjirk25"
giantswarm.io/cluster: "gjirk25"
giantswarm.io/organization: "giantswarm"
application.giantswarm.io/team: "rocket"
app.kubernetes.io/version: "0.3.0"
helm.sh/chart: "cluster-vsphere-0.3.0"
spec:
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
certSANs:
- localhost
- 127.0.0.1
- "api.gjirk25.k8s.test"
extraArgs:
cloud-provider: external
enable-admission-plugins: NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,PersistentVolumeClaimResize,DefaultStorageClass,Priority,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook
feature-gates: TTLAfterFinished=true
kubelet-preferred-address-types: "InternalIP"
logtostderr: "true"
profiling: "false"
requestheader-allowed-names: "front-proxy-client"
runtime-config: "api/all=true"
tls-cipher-suites: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"
extraVolumes:
controllerManager:
extraArgs:
authorization-always-allow-paths: "/healthz,/readyz,/livez,/metrics"
bind-address: "0.0.0.0"
cloud-provider: external
enable-hostpath-provisioner: "true"
feature-gates: ExpandPersistentVolumes=true,TTLAfterFinished=true
logtostderr: "true"
profiling: "false"
dns:
imageRepository: projects.registry.vmware.com/tkg
imageTag: v1.7.0_vmware.12
etcd:
local:
extraArgs:
listen-metrics-urls: "http://0.0.0.0:2381"
imageRepository: giantswarm
imageTag: 3.5.4-0-k8s
imageRepository: projects.registry.vmware.com/tkg
users:
- name: giantswarm
sudo: ALL=(ALL) NOPASSWD:ALL
initConfiguration:
skipPhases:
- addon/kube-proxy
patches:
directory: "/tmp/kubeadm/patches"
nodeRegistration:
criSocket: /run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
feature-gates: "ExpandPersistentVolumes=true"
eviction-hard : "memory.available<200Mi"
eviction-max-pod-grace-period: "60"
eviction-soft: "memory.available<500Mi"
eviction-soft-grace-period: "memory.available=5s"
anonymous-auth: "true"
joinConfiguration:
patches:
directory: "/tmp/kubeadm/patches"
nodeRegistration:
criSocket: /run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
feature-gates: "ExpandPersistentVolumes=true"
eviction-hard : "memory.available<200Mi"
eviction-max-pod-grace-period: "60"
eviction-soft: "memory.available<500Mi"
eviction-soft-grace-period: "memory.available=5s"
anonymous-auth: "true"
files:
- path: /etc/ssh/trusted-user-ca-keys.pem
permissions: "0600"
content: |
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM4cvZ01fLmO9cJbWUj7sfF+NhECgy+Cl0bazSrZX7sU vault-ca@vault.operations.giantswarm.io
- path: /etc/ssh/sshd_config
permissions: "0600"
content: |
# Use most defaults for sshd configuration.
Subsystem sftp internal-sftp
ClientAliveInterval 180
UseDNS no
UsePAM yes
PrintLastLog no # handled by PAM
PrintMotd no # handled by PAM
# Non defaults (#100)
ClientAliveCountMax 2
PasswordAuthentication no
TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem
MaxAuthTries 5
LoginGraceTime 60
AllowTcpForwarding no
AllowAgentForwarding no
- path: /tmp/kubeadm/patches/kube-apiserver+json.tpl
content: |-
[
{
"op": "add",
"path": "/spec/dnsPolicy",
"value": "ClusterFirstWithHostNet"
},
{
"op": "add",
"path": "/spec/containers/0/command/-",
"value": "--max-requests-inflight=$MAX_REQUESTS_INFLIGHT"
},
{
"op": "add",
"path": "/spec/containers/0/command/-",
"value": "--max-mutating-requests-inflight=$MAX_MUTATING_REQUESTS_INFLIGHT"
},
{
"op": "replace",
"path": "/spec/containers/0/resources/requests/cpu",
"value": "$API_SERVER_CPU_REQUEST"
},
{
"op": "replace",
"path": "/spec/containers/0/resources/requests/memory",
"value": "$API_SERVER_MEMORY_REQUEST"
}
]
- path: /tmp/kubeadm/patches/kube-apiserver-patch.sh
content: |-
#!/usr/bin/env bash
#
# Creates kube-apiserver+json.json file by replacing
# environment variables in kube-apiserver+json.tpl
#
set -o errexit
set -o nounset
set -o pipefail
set -x
if [ "$#" -ne 1 ]; then
echo "Illegal number of parameters" >&2
echo "Usage: bash kube-apiserver-patch.sh <resource-ratio>" >&2
exit 1
fi
ratio=$1
cpus="$(grep -c ^processor /proc/cpuinfo)"
memory="$(awk '/MemTotal/ { printf "%d \n", $2/1024 }' /proc/meminfo)"
export MAX_REQUESTS_INFLIGHT=$((cpus*(1600/ratio)))
export MAX_MUTATING_REQUESTS_INFLIGHT=$((cpus*(800/ratio)))
export API_SERVER_CPU_REQUEST=$((cpus*(1000/ratio)))m
export API_SERVER_MEMORY_REQUEST=$((memory/ratio))Mi
envsubst < "/tmp/kubeadm/patches/kube-apiserver+json.tpl" > "/tmp/kubeadm/patches/kube-apiserver+json.json"
- content: |
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- start
env:
- name: vip_arp
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_address
value: 10.10.222.242
- name: vip_interface
value: eth0
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
image: ghcr.io/kube-vip/kube-vip:v0.3.5
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_TIME
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
type: FileOrCreate
name: kubeconfig
status: {}
owner: root:root
path: /etc/kubernetes/manifests/kube-vip.yaml
preKubeadmCommands:
# - bash /tmp/kubeadm/patches/kube-apiserver-patch.sh 8
- hostname '{{ ds.meta_data.hostname }}'
- echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts
- echo "127.0.0.1 localhost" >>/etc/hosts
- echo '127.0.0.1 {{ ds.meta_data.hostname }}' >>/etc/hosts
- echo '{{ ds.meta_data.hostname }}' >/etc/hostname
postKubeadmCommands:
- systemctl restart sshd
useExperimentalRetryJoin: true
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
name: gjirk25-control-plane-798e9a27
namespace: default
replicas: 1
version: v1.25.6
---
# Source: cluster-vsphere/templates/machinedeployment.yaml
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: gjirk25-worker
namespace: default
labels:
app: "cluster-vsphere"
app.kubernetes.io/managed-by: "Helm"
cluster.x-k8s.io/cluster-name: "gjirk25"
giantswarm.io/cluster: "gjirk25"
giantswarm.io/organization: "giantswarm"
application.giantswarm.io/team: "rocket"
app.kubernetes.io/version: "0.3.0"
helm.sh/chart: "cluster-vsphere-0.3.0"
spec:
clusterName: gjirk25
replicas: 2
revisionHistoryLimit: 0
selector:
matchLabels: {}
template:
metadata:
labels:
app: "cluster-vsphere"
app.kubernetes.io/managed-by: "Helm"
cluster.x-k8s.io/cluster-name: "gjirk25"
giantswarm.io/cluster: "gjirk25"
giantswarm.io/organization: "giantswarm"
application.giantswarm.io/team: "rocket"
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: gjirk25-worker-8ac3116f
namespace: default
clusterName: gjirk25
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
name: gjirk25-default-3affd4ad
namespace: default
version: v1.25.6
---
# Source: cluster-vsphere/templates/vspherecluster.yaml
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereCluster
metadata:
name: gjirk25
namespace: default
labels:
app: "cluster-vsphere"
app.kubernetes.io/managed-by: "Helm"
cluster.x-k8s.io/cluster-name: "gjirk25"
giantswarm.io/cluster: "gjirk25"
giantswarm.io/organization: "giantswarm"
application.giantswarm.io/team: "rocket"
app.kubernetes.io/version: "0.3.0"
helm.sh/chart: "cluster-vsphere-0.3.0"
spec:
controlPlaneEndpoint:
host: '10.10.222.242'
port: 6443
identityRef:
kind: Secret
name: gjirk25-credentials
server:
thumbprint:
---
# Source: cluster-vsphere/templates/vspheremachinetemplate.yaml
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
metadata:
name: gjirk25-control-plane-798e9a27
namespace: default
labels:
app: "cluster-vsphere"
app.kubernetes.io/managed-by: "Helm"
cluster.x-k8s.io/cluster-name: "gjirk25"
giantswarm.io/cluster: "gjirk25"
giantswarm.io/organization: "giantswarm"
application.giantswarm.io/team: "rocket"
app.kubernetes.io/version: "0.3.0"
helm.sh/chart: "cluster-vsphere-0.3.0"
spec:
template:
spec:
datacenter:
datastore:
server:
thumbprint:
cloneMode: linkedClone
diskGiB: 25
memoryMiB: 8192
network:
devices:
- dhcp4: true
networkName: grasshopper-capv
numCPUs: 2
resourcePool: grasshopper
template: ubuntu-2004-kube-v1.25.6
---
# Source: cluster-vsphere/templates/vspheremachinetemplate.yaml
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
metadata:
name: gjirk25-default-3affd4ad
namespace: default
labels:
app: "cluster-vsphere"
app.kubernetes.io/managed-by: "Helm"
cluster.x-k8s.io/cluster-name: "gjirk25"
giantswarm.io/cluster: "gjirk25"
giantswarm.io/organization: "giantswarm"
application.giantswarm.io/team: "rocket"
app.kubernetes.io/version: "0.3.0"
helm.sh/chart: "cluster-vsphere-0.3.0"
spec:
template:
spec:
datacenter:
datastore:
server:
thumbprint:
cloneMode: linkedClone
diskGiB: 25
network:
devices:
- dhcp4: true
networkName: grasshopper-capv
numCPUs: 2
resourcePool: grasshopper
template: ubuntu-2004-kube-v1.25.6
---
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment