Skip to content

Instantly share code, notes, and snippets.

@jacobweinstock
Last active July 17, 2023 21:23
Show Gist options
  • Save jacobweinstock/2d331e81e5a9d2ed74a9e3b4eae869cd to your computer and use it in GitHub Desktop.
Save jacobweinstock/2d331e81e5a9d2ed74a9e3b4eae869cd to your computer and use it in GitHub Desktop.
Tinkerbell with Cloud init (optionally install a single node Kubernetes cluster)
apiVersion: tinkerbell.org/v1alpha1
kind: Hardware
metadata:
name: hp-demo
namespace: tink-system
spec:
disks:
- device: /dev/nvme0n1
userData: >-
#cloud-config
package_update: true
users:
- name: tink
sudo: ['ALL=(ALL) NOPASSWD:ALL']
shell: /bin/bash
plain_text_passwd: 'tink'
lock_passwd: false
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDW7BP54hSp3TrQjQq7O+oprZdXH8zbKBww/YJyCD9ksM/Y3BiFaCDwzN/vcRSslkn0kJDUq7TxmKp9bEZLTXqAiRe7GflNGoiAUuNY9EWnxt305HIkBs+OEdV6KDtnlm9sRAADflzbDi6YiMjbwNcfoRoxTgpo6BNlzv9Y3prDXiwEjxvosK+4WWIVTTEh33nNvQ5iQhPqBNgURmjQx9EDXFIRdZzA8OykPNLIqFdzmxGZWWxFbW/n6nEl/96b6w7Gx0YgzTSLs+6WAQl8SMP9l22L6puitpjihRw9cWRJ9r6x1eLqgc5Sv7gDKOMXghbmS6hy+AtrxCPPJgq7Mguc5bPAqTZlYMy98dxpHVqtAnBso/9aLOzAXX6At/0QUIwMP693B11NTGniIMtBxnD/yWvGoxTXNmXcTvj13cTzSv9czaGSJ+MTRIugtgyouZADfs8v59NV9KoaEq8umy6WEhmtw5wkjzvC5KK4N2bsM1N+8lSIKxYWxWZFsdYBP8ep442Z/2T5R8y8c5cp7tQqqapDt8JPJ0OPq3sn30BO3X8MgvmoB39j4Cqok1y9VuouPH4RalRLMR7KrASdlFengjt0vWBUoNaEuxRdJR2eOM6SpZh6YGqLdQH1MLaBOzDTH2tTLyTXCOSJpve6ZHOPbjS2BF34a1Kj52NTFtiYTw==
packages:
- openssl
runcmd:
- sed -i 's/^PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
- systemctl enable ssh.service
- systemctl start ssh.service
- systemctl disable apparmor
- systemctl disable snapd
- rm -f /etc/hostname
# uncomment this line below to install a single node kubeadm kubernetes cluster.
# - curl https://gist.githubusercontent.com/jacobweinstock/2d331e81e5a9d2ed74a9e3b4eae869cd/raw/install.sh | bash -x -- 2>&1 | tee /var/log/kubeadm-install.log
metadata:
facility:
facility_code: onprem
manufacturer:
slug: hp
instance:
userdata: ""
hostname: hp-demo
id: f8:b4:6a:ab:8d:40
operating_system:
distro: ubuntu
os_slug: ubuntu_22_04
version: "22.04"
interfaces:
- dhcp:
arch: x86_64
hostname: hp-demo
ip:
address: 192.168.2.147
gateway: 192.168.2.1
netmask: 255.255.255.0
lease_time: 86400
mac: f8:b4:6a:ab:8d:40
name_servers:
- 1.1.1.1
- 8.8.8.8
uefi: true
netboot:
allowPXE: true
allowWorkflow: true
#!/bin/bash
set -e
# Must be run as root
# based on https://blog.radwell.codes/2022/07/single-node-kubernetes-cluster-via-kubeadm-on-ubuntu-22-04/
# Create a single node kubeadm kubernetes cluster
# Prerequisites
apt update
apt install -y apt-transport-https ca-certificates curl gpg kmod
# kmod is needed when running in a container with podman
# Install containerd
mkdir -p /etc/containerd
cat <<EOF> /etc/containerd/config.toml
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
EOF
CONTAINERD_VERSION=1.7.2
CONTAINERD_ARCH=linux-amd64
CONTAINERD_DOWNLOAD_URL=https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-${CONTAINERD_ARCH}.tar.gz
curl -fLo containerd.tar.gz ${CONTAINERD_DOWNLOAD_URL}
# Extract the binaries
tar Cxzvf /usr/local containerd.tar.gz
rm -f containerd.tar.gz
# Install containerd as a service
CONTAINERD_SYSTEMD_SERVICE_URL=https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
curl -fsSLo /etc/systemd/system/containerd.service ${CONTAINERD_SYSTEMD_SERVICE_URL}
systemctl daemon-reload
systemctl enable --now containerd
# Install runc
RUNC_VERSION=1.1.7
RUNC_ARCH=amd64
RUNC_DOWNLOAD_URL=https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.${RUNC_ARCH}
curl -fsSLo runc.amd64 ${RUNC_DOWNLOAD_URL}
install -m 755 runc.amd64 /usr/local/sbin/runc
rm -f runc.amd64
# Install CNI
CNI_VERSION=1.3.0
CNI_ARCH=linux-amd64
CNI_DOWNLOAD_URL=https://github.com/containernetworking/plugins/releases/download/v${CNI_VERSION}/cni-plugins-${CNI_ARCH}-v${CNI_VERSION}.tgz
curl -fLo cni-plugins.tgz ${CNI_DOWNLOAD_URL}
mkdir -p /opt/cni/bin
tar Cxzvf /opt/cni/bin cni-plugins.tgz
rm -f cni-plugins.tgz
# Forward IPv4 and let iptables see bridged network traffic
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe -a overlay br_netfilter
# sysctl params required by setup, params persist across reboots
cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# Apply sysctl params without reboot
sysctl --system
# Install kubeadm, kubelet & kubectl
# This will install the latest version of kubernetes.
curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list
apt update
apt install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
# Ensure swap is disabled
# See if swap is enabled
swapon --show
# Turn off swap
swapoff -a
# Disable swap completely
sed -i -e '/swap/d' /etc/fstab
# create the kubernetes cluster
DEFAULT_IP=$(ip -o route get to 8.8.8.8 | sed -n 's/.*src \([0-9.]\+\).*/\1/p')
kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-cert-extra-sans=${DEFAULT_IP}
# Configure kubectl
mkdir -p /root/.kube /.kube
cp -af /etc/kubernetes/admin.conf /root/.kube/config
cp -af /etc/kubernetes/admin.conf /.kube/config
chown $(id -u):$(id -g) /root/.kube/config
chown $(id -u):$(id -g) /.kube/config
# Untaint the single node
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
kubectl label nodes --all node.kubernetes.io/exclude-from-external-load-balancers-
# Install a CNI implementation
# install kube-router
# this will install the latest version of kube-router
kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml
# Install a load balancer and cloud controller
# this allows Kubernetes services to request a service.spec.type: LoadBalancer
# install kubevip
# install kubevip cloud controller
kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
# create a config map with the range of IPs available to be assigned
kubectl create configmap -n kube-system kubevip --from-literal range-global=192.168.2.220-192.168.2.224
# install kubevip load balancer service
kubectl apply -f https://gist.githubusercontent.com/jacobweinstock/2d331e81e5a9d2ed74a9e3b4eae869cd/raw/kubevip.yaml
rm -rf /.kube/
# troubleshooting tools
# install nerdctl
NERDCTL_VERSION=1.4.0
NERDCTL_ARCH=linux-amd64
wget https://github.com/containerd/nerdctl/releases/download/v${NERDCTL_VERSION}/nerdctl-${NERDCTL_VERSION}-${NERDCTL_ARCH}.tar.gz -O - | tar -zxv -C /usr/local/bin nerdctl
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kubevip
namespace: kube-system
labels:
app.kubernetes.io/name: kubevip
spec:
selector:
matchLabels:
app.kubernetes.io/name: kubevip
template:
metadata:
labels:
app.kubernetes.io/name: kubevip
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: svc_enable
value: "true"
- name: svc_election
value: "true"
- name: enableServicesElection
value: "true"
image: ghcr.io/kube-vip/kube-vip:v0.5.7
imagePullPolicy: IfNotPresent
name: kubevip
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
hostNetwork: true
serviceAccountName: kubevip
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubevip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: kube-vip-role
rules:
- apiGroups: [""]
resources:
- "services"
- "services/status"
- "nodes"
- "endpoints"
verbs:
- "list"
- "get"
- "watch"
- "update"
- apiGroups:
- "coordination.k8s.io"
resources:
- "leases"
verbs:
- "list"
- "get"
- "watch"
- "update"
- "create"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-vip-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-vip-role
subjects:
- kind: ServiceAccount
name: kubevip
namespace: kube-system
apiVersion: "tinkerbell.org/v1alpha1"
kind: Template
metadata:
name: ubuntu-22-04
namespace: tink-system
spec:
data: |
version: "0.1"
name: ubuntu_22_04
global_timeout: 9800
tasks:
- name: "os-installation"
worker: "{{.device_1}}"
volumes:
- /dev:/dev
- /dev/console:/dev/console
- /lib/firmware:/lib/firmware:ro
actions:
- name: "stream-ubuntu-image"
image: quay.io/tinkerbell-actions/image2disk:v1.0.0
timeout: 9600
environment:
DEST_DISK: {{ index .Hardware.Disks 0 }}
IMG_URL: "http://{{ .artifact_server_ip_port }}/jammy-server-cloudimg-amd64.raw.gz"
COMPRESSED: true
- name: "add-cloud-init-config"
image: quay.io/tinkerbell-actions/writefile:v1.0.0
timeout: 90
environment:
CONTENTS: |
datasource:
Ec2:
metadata_urls: ["http://{{ .hegel_ip_port }}"]
strict_id: false
manage_etc_hosts: localhost
warnings:
dsid_missing_source: off
DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }}
DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg
DIRMODE: "0700"
FS_TYPE: ext4
GID: "0"
MODE: "0600"
UID: "0"
- name: "add-tink-cloud-init-ds-config"
image: quay.io/tinkerbell-actions/writefile:v1.0.0
timeout: 90
environment:
DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }}
FS_TYPE: ext4
DEST_PATH: /etc/cloud/ds-identify.cfg
UID: 0
GID: 0
MODE: 0600
DIRMODE: 0700
CONTENTS: |
datasource: Ec2
- name: "write-netplan"
image: quay.io/tinkerbell-actions/writefile:v1.0.0
timeout: 90
environment:
DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }}
FS_TYPE: ext4
DEST_PATH: /etc/netplan/config.yaml
CONTENTS: |
network:
version: 2
renderer: networkd
ethernets:
id0:
match:
name: en*
dhcp4: true
UID: 0
GID: 0
MODE: 0644
DIRMODE: 0755
- name: "create-debugging-user"
image: quay.io/tinkerbell-actions/cexec:v1.0.0
timeout: 90
environment:
BLOCK_DEVICE: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }}
FS_TYPE: ext4
CHROOT: y
DEFAULT_INTERPRETER: "/bin/sh -c"
CMD_LINE: "useradd -p $(openssl passwd -1 debug) -s /bin/bash -d /home/debug/ -m -G sudo debug"
- name: "kexec-into-installed-os"
image: quay.io/tinkerbell-actions/kexec:v1.0.0
timeout: 90
pid: host
environment:
BLOCK_DEVICE: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }}
FS_TYPE: ext4
apiVersion: "tinkerbell.org/v1alpha1"
kind: Workflow
metadata:
name: hp01
namespace: tink-system
spec:
templateRef: ubuntu-22-04
hardwareRef: hp01
hardwareMap:
device_1: f8:b4:6a:ab:8d:40
hegel_ip_port: 192.168.2.88:50061
artifact_server_ip_port: 192.168.2.88:8080
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment