Skip to content

Instantly share code, notes, and snippets.

@jianzzha
Created June 4, 2024 19:48
Show Gist options
  • Save jianzzha/38ce46287e493e914b5f5b97b5666172 to your computer and use it in GitHub Desktop.
Save jianzzha/38ce46287e493e914b5f5b97b5666172 to your computer and use it in GitHub Desktop.
Steps to bring up an OCP VM using a k8s VM
1. Create k8s cluster on first VM
# setup KIND bootstrap cluster
https://github.com/jianzzha/sylva-poc/tree/main/kind-metal3-deploy
# create k8s BMH resource
kubectl apply -f k8s-bmh.yaml
# install capi
clusterctl init --infrastructure metal3
#create k8s cluster
kubectl apply -f k8s-cluster.yaml
# get k8s kubeconfig file
clusterctl get kubeconfig my-cluster > kubeconfig
# pivot to k8s
export KUBECONFIG=$PWD/kubeconfig
2. create OCP cluster on 2nd VM
# enable local path storage
kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.27/deploy/local-path-storage.yaml
# install metallb and ingress
install_metallb_ingress.sh
# update ironic and bmo directory ip address to VM1, for example
#DEPLOY_KERNEL_URL=http://192.168.222.100:6180/images/ironic-python-agent.kernel
#DEPLOY_RAMDISK_URL=http://192.168.222.100:6180/images/ironic-python-agent.initramfs
#IRONIC_ENDPOINT=https://192.168.222.100:6385/v1/
#CACHEURL=http://192.168.222.100/images
# install ironi and bmo
kubectl apply -k ironic
kubectl apply -k bmo
# update helm chart to use local path storage
[root@xr12-55 manifest-dev]# git diff object-storage-pvc.yaml
accessModes:
- ReadWriteOnce
+ storageClassName: local-path
resources:
[root@xr12-55 manifest-dev]# git diff postgres-storage.yaml
accessModes:
- ReadWriteOnce
+ storageClassName: local-path
resources:
# use helm to bring up assisted-installer
helm install installer charts/assisted-installer
# bring up SNO cluster
kubectl apply -f test-sno.yaml
listen-address=192.168.222.1
bind-interfaces
strict-order
local=/lab.home/
domain=lab.home
expand-hosts
except-interface=lo
except-interface=eno8303np0
interface=baremetal
server=8.8.8.8
dhcp-range=192.168.222.20,192.168.222.100
dhcp-no-override
dhcp-authoritative
dhcp-lease-max=41
dhcp-host=00:60:2f:31:81:01,192.168.222.30,okd-sno
dhcp-host=00:60:2f:31:81:aa,192.168.222.100,k8s
dhcp-host=00:60:2f:31:81:bb,192.168.222.101,k8s-b
dhcp-option=42,0.0.0.0
enable-tftp
tftp-root=/var/lib/tftpboot
dhcp-vendorclass=BIOS,PXEClient:Arch:00000
dhcp-vendorclass=efi64,PXEClient:Arch:00007
dhcp-match=set:ipxe,175 # iPXE sends a 175 option.
dhcp-boot=tag:BIOS,undionly.kpxe
dhcp-boot=tag:efi64,ipxe.efi
dhcp-boot=tag:ipxe,http://192.168.222.1/bootstrap.ipxe
address=/api.okd-sno.lab.home/192.168.222.30
address=/api-int.okd-sno.lab.home/192.168.222.30
address=/.apps.okd-sno.lab.home/192.168.222.30
address=/assisted.com/192.168.222.200
#!/bin/sh
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.5/config/manifests/metallb-native.yaml
kubectl wait --for=condition=available deployment/controller --timeout=300s -n metallb-system
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace
kubectl wait --for=condition=available deployment/ingress-nginx-controller --timeout=300s -n ingress-nginx
kubectl apply -f metallb-config.yaml
kubectl patch configmap/ingress-nginx-controller -n ingress-nginx --type merge -p '{"data":{"worker-processes":"1"}}'
# bmh for k8s VM
apiVersion: v1
kind: Secret
metadata:
name: bmh-vm-secret
type: Opaque
stringData:
username: YWRtaW4=
password: cGFzc3dvcmQ=
---
apiVersion: metal3.io/v1alpha1
kind: BareMetalHost
metadata:
name: k8s
spec:
online: true
bootMACAddress: 00:60:2f:31:81:aa
bootMode: legacy
bmc:
address: redfish-virtualmedia+http://192.168.222.1:8000/redfish/v1/Systems/${DOMID}
credentialsName: bmh-vm-secret
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: my-cluster
namespace: default
spec:
clusterNetwork:
pods:
cidrBlocks:
- 172.200.0.0/18
services:
cidrBlocks:
- 10.96.0.0/12
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: my-cluster
namespace: default
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3Cluster
name: my-cluster
namespace: default
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3Cluster
metadata:
name: my-cluster
namespace: default
spec:
controlPlaneEndpoint:
host: 192.168.222.100
port: 6443
noCloudProvider: true
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: my-cluster
namespace: default
spec:
kubeadmConfigSpec:
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
node-labels: metal3.io/uuid={{ ds.meta_data.uuid }}
name: '{{ ds.meta_data.name }}'
joinConfiguration:
controlPlane: {}
nodeRegistration:
kubeletExtraArgs:
node-labels: metal3.io/uuid={{ ds.meta_data.uuid }}
name: '{{ ds.meta_data.name }}'
files:
- path: /etc/crio/crio.conf.d/90-crun.conf
owner: root:root
permissions: "0644"
content: |
[crio.image]
insecure_registries = [
"192.168.222.1:5000"
]
[crio.image.registry]
[[crio.image.registry.mirrors]]
location = "docker.io"
mirrors = ["192.168.222.1:5000"]
users:
- name: user
passwd: "$6$0nxY.oOoNOLuhcGl$7KCN0P0.izj8shtXqcioT4B/dVZwxwYxWp1uIESflgwNNDRItLtnO/B9XZdh6sB.pgp5J6tWJNWO27Qwzpp791"
sudo: ALL=(ALL) NOPASSWD:ALL
sshAuthorizedKeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDx+Fn9xo8Kzl6SudvU1u47rsFvs+C5RcacH7hchMN/Isaw1LfMfqS1FMFLWhfbTUiI5+DLUCAvUANwhmar5lXQQvd9kIbKp76hhsl/NzKeTfpUqOzb1Abr2HbAOQKzv56apNqXZwwzQaIgoj7ZT73D5y+B46lKqvvPV+sS022pI7bwJu5UR9QttlMia7+p/EjfqtgS/4fZsVOmEd6iqUZpMazaUhvzYR5MVrKbM0sumZcZlVda9KpCrLG8scPusozKjexJTuU/UwjYI+SQOqZGc8T9t9DfBPX2dWjUgpLAvBu2X+IJ+cW9ck+aNHw2ioJKsvo3/t2YHpzuK6sLmVmcBQtP7qfwetJ5iN03rm9JVAUrGFapPhM/Ysjf8FhmsI3CLjk3dRqAEwlcKyvbOS/PipPGwhuv9JLes13k8jpwWXvr+HeVaMbgmnDm4405KgI8JYXR8xUA205rIeDUp60bCRnxFQ8RHAUGY0k9Zoe17FQtyKkQZfwiPNKvnrANxBc=
root@xr12-55
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3MachineTemplate
name: my-cluster-controlplane
namespace: default
nodeDrainTimeout: 0s
replicas: 1
rolloutStrategy:
rollingUpdate:
maxSurge: 1
type: RollingUpdate
version: v1.29.0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3MachineTemplate
metadata:
name: my-cluster-controlplane
namespace: default
spec:
nodeReuse: false
template:
spec:
automatedCleaningMode: metadata
dataTemplate:
name: my-cluster-controlplane-template
image:
checksum: ab54897a1bcae83581512cdeeda787f009846cfd7a63b298e472c1bd6c522d23
checksumType: sha256
format: qcow2
url: http://192.168.222.1:11111/CENTOS_9_NODE_IMAGE_K8S_v1.29.0.qcow2
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
labels:
cluster.x-k8s.io/cluster-name: my-cluster
nodepool: nodepool-0
name: my-cluster
namespace: default
spec:
clusterName: my-cluster
replicas: 0
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: my-cluster
nodepool: nodepool-0
template:
metadata:
labels:
cluster.x-k8s.io/cluster-name: my-cluster
nodepool: nodepool-0
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: my-cluster-workers
clusterName: my-cluster
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3MachineTemplate
name: my-cluster-workers
nodeDrainTimeout: 0s
version: v1.29.0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3MachineTemplate
metadata:
name: my-cluster-workers
namespace: default
spec:
nodeReuse: false
template:
spec:
automatedCleaningMode: metadata
dataTemplate:
name: my-cluster-workers-template
image:
checksum: ab54897a1bcae83581512cdeeda787f009846cfd7a63b298e472c1bd6c522d23
checksumType: sha256
format: qcow2
url: http://192.168.222.1:11111/CENTOS_9_NODE_IMAGE_K8S_v1.29.0.qcow2
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: my-cluster-workers
namespace: default
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
node-labels: metal3.io/uuid={{ ds.meta_data.uuid }}
name: '{{ ds.meta_data.name }}'
files:
- path: /etc/crio/crio.conf.d/90-crun.conf
owner: root:root
permissions: "0644"
content: |
[crio.image]
insecure_registries = [
"192.168.222.1:5000"
]
[crio.image.registry]
[[crio.image.registry.mirrors]]
location = "docker.io"
mirrors = ["192.168.222.1:5000"]
users:
- name: user
passwd: "$6$0nxY.oOoNOLuhcGl$7KCN0P0.izj8shtXqcioT4B/dVZwxwYxWp1uIESflgwNNDRItLtnO/B9XZdh6sB.pgp5J6tWJNWO27Qwzpp791"
sudo: ALL=(ALL) NOPASSWD:ALL
sshAuthorizedKeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDx+Fn9xo8Kzl6SudvU1u47rsFvs+C5RcacH7hchMN/Isaw1LfMfqS1FMFLWhfbTUiI5+DLUCAvUANwhmar5lXQQvd9kIbKp76hhsl/NzKeTfpUqOzb1Abr2HbAOQKzv56apNqXZwwzQaIgoj7ZT73D5y+B46lKqvvPV+sS022pI7bwJu5UR9QttlMia7+p/EjfqtgS/4fZsVOmEd6iqUZpMazaUhvzYR5MVrKbM0sumZcZlVda9KpCrLG8scPusozKjexJTuU/UwjYI+SQOqZGc8T9t9DfBPX2dWjUgpLAvBu2X+IJ+cW9ck+aNHw2ioJKsvo3/t2YHpzuK6sLmVmcBQtP7qfwetJ5iN03rm9JVAUrGFapPhM/Ysjf8FhmsI3CLjk3dRqAEwlcKyvbOS/PipPGwhuv9JLes13k8jpwWXvr+HeVaMbgmnDm4405KgI8JYXR8xUA205rIeDUp60bCRnxFQ8RHAUGY0k9Zoe17FQtyKkQZfwiPNKvnrANxBc=
root@xr12-55
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3DataTemplate
metadata:
name: my-cluster-controlplane-template
namespace: default
spec:
clusterName: my-cluster
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3DataTemplate
metadata:
name: my-cluster-workers-template
namespace: default
spec:
clusterName: my-cluster
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: local-path-provisioner-role
namespace: local-path-storage
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims", "configmaps", "pods", "pods/log"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: local-path-provisioner-bind
namespace: local-path-storage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:master-head
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}
setup: |-
#!/bin/sh
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
set -eu
rm -rf "$VOL_DIR"
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
priorityClassName: system-node-critical
tolerations:
- key: node.kubernetes.io/disk-pressure
operator: Exists
effect: NoSchedule
containers:
- name: helper-pod
image: 192.168.222.1:5000/library/busybox
imagePullPolicy: IfNotPresent
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: first-pool
namespace: metallb-system
spec:
addresses:
- 192.168.222.200-192.168.222.200
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: example
namespace: metallb-system
apiVersion: v1
kind: Namespace
metadata:
name: test-capi
---
apiVersion: v1
data:
.dockerconfigjson: [pull secret in base64]
kind: Secret
metadata:
name: pull-secret
namespace: test-capi
type: kubernetes.io/dockerconfigjson
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: okd-sno
namespace: test-capi
spec:
clusterNetwork:
pods:
cidrBlocks:
- 172.18.0.0/21
services:
cidrBlocks:
- 10.96.0.0/12
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: AgentControlPlane
name: okd-sno
namespace: test-capi
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3Cluster
name: okd-sno
namespace: test-capi
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3Cluster
metadata:
name: okd-sno
namespace: test-capi
spec:
controlPlaneEndpoint:
host: apiendpointhost
port: 9999
noCloudProvider: true
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: AgentControlPlane
metadata:
name: okd-sno
namespace: test-capi
spec:
agentBootstrapConfigSpec:
pullSecretRef:
name: "pull-secret"
sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDx+Fn9xo8Kzl6SudvU1u47rsFvs+C5RcacH7hchMN/Isaw1LfMfqS1FMFLWhfbTUiI5+DLUCAvUANwhmar5lXQQvd9kIbKp76hhsl/NzKeTfpUqOzb1Abr2HbAOQKzv56apNqXZwwzQaIgoj7ZT73D5y+B46lKqvvPV+sS022pI7bwJu5UR9QttlMia7+p/EjfqtgS/4fZsVOmEd6iqUZpMazaUhvzYR5MVrKbM0sumZcZlVda9KpCrLG8scPusozKjexJTuU/UwjYI+SQOqZGc8T9t9DfBPX2dWjUgpLAvBu2X+IJ+cW9ck+aNHw2ioJKsvo3/t2YHpzuK6sLmVmcBQtP7qfwetJ5iN03rm9JVAUrGFapPhM/Ysjf8FhmsI3CLjk3dRqAEwlcKyvbOS/PipPGwhuv9JLes13k8jpwWXvr+HeVaMbgmnDm4405KgI8JYXR8xUA205rIeDUp60bCRnxFQ8RHAUGY0k9Zoe17FQtyKkQZfwiPNKvnrANxBc= root@xr12-55
agentConfigSpec:
releaseImage: quay.io/openshift-release-dev/ocp-release:4.14.0-x86_64
baseDomain: lab.home
pullSecretRef:
name: "pull-secret"
sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDx+Fn9xo8Kzl6SudvU1u47rsFvs+C5RcacH7hchMN/Isaw1LfMfqS1FMFLWhfbTUiI5+DLUCAvUANwhmar5lXQQvd9kIbKp76hhsl/NzKeTfpUqOzb1Abr2HbAOQKzv56apNqXZwwzQaIgoj7ZT73D5y+B46lKqvvPV+sS022pI7bwJu5UR9QttlMia7+p/EjfqtgS/4fZsVOmEd6iqUZpMazaUhvzYR5MVrKbM0sumZcZlVda9KpCrLG8scPusozKjexJTuU/UwjYI+SQOqZGc8T9t9DfBPX2dWjUgpLAvBu2X+IJ+cW9ck+aNHw2ioJKsvo3/t2YHpzuK6sLmVmcBQtP7qfwetJ5iN03rm9JVAUrGFapPhM/Ysjf8FhmsI3CLjk3dRqAEwlcKyvbOS/PipPGwhuv9JLes13k8jpwWXvr+HeVaMbgmnDm4405KgI8JYXR8xUA205rIeDUp60bCRnxFQ8RHAUGY0k9Zoe17FQtyKkQZfwiPNKvnrANxBc= root@xr12-55
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3MachineTemplate
name: okd-sno-controlplane
namespace: test-capi
replicas: 1
version: v4.14.0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3MachineTemplate
metadata:
name: okd-sno-controlplane
namespace: test-capi
spec:
nodeReuse: false
template:
spec:
automatedCleaningMode: disabled
dataTemplate:
name: okd-sno-controlplane-template
image:
checksum: null
checksumType: null
format: live-iso
url: https://abcde
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3MachineTemplate
metadata:
name: okd-sno-workers-2
namespace: test-capi
spec:
nodeReuse: false
template:
spec:
automatedCleaningMode: metadata
dataTemplate:
name: okd-sno-workers-template
image:
checksum: null
checksumType: null
format: live-iso
url: https://abcde
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3DataTemplate
metadata:
name: okd-sno-controlplane-template
namespace: test-capi
spec:
clusterName: okd-sno
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: Metal3DataTemplate
metadata:
name: okd-sno-workers-template
namespace: test-capi
spec:
clusterName: okd-sno
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment