Skip to content

Instantly share code, notes, and snippets.

@djjudas21
Created March 2, 2023 12:19
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save djjudas21/17a53e5a395ebb55171486e7abb8ac48 to your computer and use it in GitHub Desktop.
Save djjudas21/17a53e5a395ebb55171486e7abb8ac48 to your computer and use it in GitHub Desktop.
Config for deploying OpenEBS cStor
apiVersion: cstor.openebs.io/v1
kind: CStorPoolCluster
metadata:
name: cstor-disk-pool
namespace: openebs
spec:
pools:
- nodeSelector:
kubernetes.io/hostname: kube05
dataRaidGroups:
- blockDevices:
- blockDeviceName: blockdevice-354a52155915d78c7dbb65c45dea97dd
poolConfig:
dataRaidGroupType: "stripe"
- nodeSelector:
kubernetes.io/hostname: kube06
dataRaidGroups:
- blockDevices:
- blockDeviceName: blockdevice-f92d579e5fa4c64cc4c912bb5e885bdc
poolConfig:
dataRaidGroupType: "stripe"
- nodeSelector:
kubernetes.io/hostname: kube07
dataRaidGroups:
- blockDevices:
- blockDeviceName: blockdevice-e10c781b95652e604ccdfeb4a31bb6a1
poolConfig:
dataRaidGroupType: "stripe"
- nodeSelector:
kubernetes.io/hostname: kube08
dataRaidGroups:
- blockDevices:
- blockDeviceName: blockdevice-81afde47e914d3f235e895462ca7f152
poolConfig:
dataRaidGroupType: "stripe"
#!/bin/sh
helm upgrade -i --create-namespace \
-n openebs openebs \
-f values.yaml \
openebs/openebs
kubectl apply -f cspc.yaml
kubectl apply -f sc.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: cstor
provisioner: cstor.csi.openebs.io
allowVolumeExpansion: true
parameters:
cas-type: cstor
# cstorPoolCluster should have the name of the CSPC
cstorPoolCluster: cstor-disk-pool
# replicaCount should be <= no. of CSPI created in the selected CSPC
replicaCount: "3"
apiserver:
enabled: true
replicas: 1
ports:
externalPort: 5656
internalPort: 5656
sparse:
enabled: "false"
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
## apiserver resource requests and limits
## Reference: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# limits:
# cpu: 1000m
# memory: 2Gi
# requests:
# cpu: 500m
# memory: 1Gi
defaultStorageConfig:
enabled: "true"
# Directory used by the OpenEBS to store debug information and so forth
# that are generated in the course of running OpenEBS containers.
varDirectoryPath:
# --set varDirectoryPath.baseDir="$SNAP_COMMON/var/openebs"
baseDir: "/nvme/openebs"
provisioner:
enabled: true
replicas: 1
enableLeaderElection: true
patchJivaNodeAffinity: enabled
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
## provisioner resource requests and limits
## Reference: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# limits:
# cpu: 1000m
# memory: 2Gi
# requests:
# cpu: 1m
# memory: 16Mi
# If you want to enable local pv as a dependency chart then set
# `localprovisioner.enabled: false` and enable it as dependency chart.
# If you are using custom configuration then update those configuration
# under `localpv-provisioner` key.
localprovisioner:
enabled: false
replicas: 1
enableLeaderElection: true
# These fields are deprecated. Please use the fields (see below)
# - deviceClass.enabled
# - hostpathClass.enabled
enableDeviceClass: true
enableHostpathClass: true
# This sets default directory used by the provisioner to provision
# hostpath volumes.
# --set localprovisioner.basePath="$SNAP_COMMON/var/openebs/local" \
basePath: "/nvme/openebs/local"
# This sets the number of times the provisioner should try
# with a polling interval of 5 seconds, to get the Blockdevice
# Name from a BlockDeviceClaim, before the BlockDeviceClaim
# is deleted. E.g. 12 * 5 seconds = 60 seconds timeout
waitForBDBindTimeoutRetryCount: "12"
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
## localprovisioner resource requests and limits
## Reference: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
# limits:
# cpu: 1000m
# memory: 2Gi
requests:
cpu: 1m
memory: 16Mi
deviceClass:
# Name of default device StorageClass.
name: openebs-device
# If true, enables creation of the openebs-device StorageClass
enabled: true
# Available reclaim policies: Delete/Retain, defaults: Delete.
reclaimPolicy: Delete
# If true, sets the openebs-device StorageClass as the default StorageClass
isDefaultClass: false
# Custom node affinity label(s) for example "openebs.io/node-affinity-value"
# that will be used instead of hostnames
# This helps in cases where the hostname changes when the node is removed and
# added back with the disks still intact.
# Example:
# nodeAffinityLabels:
# - "openebs.io/node-affinity-key-1"
# - "openebs.io/node-affinity-key-2"
nodeAffinityLabels: []
# Sets the filesystem to be written to the blockdevice before
# mounting (filesystem volumes)
# This is only usable if the selected BlockDevice does not already
# have a filesystem
# Valid values: "ext4", "xfs"
fsType: "ext4"
# Label block devices in the cluster that you would like the openEBS localPV
# Provisioner to pick up those specific block devices available on the node.
# Set the label key and value as shown in the example below.
#
# To read more: https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/tutorials/device/blockdevicetag.md
#
# Example:
# blockDeviceSelectors:
# ndm.io/driveType: "SSD"
# ndm.io/fsType: "none"
blockDeviceSelectors: {}
hostpathClass:
# Name of the default hostpath StorageClass
name: openebs-hostpath
# If true, enables creation of the openebs-hostpath StorageClass
enabled: true
# Available reclaim policies: Delete/Retain, defaults: Delete.
reclaimPolicy: Delete
# If true, sets the openebs-hostpath StorageClass as the default StorageClass
isDefaultClass: false
# Path on the host where local volumes of this storage class are mounted under.
# NOTE: If not specified, this defaults to the value of localprovisioner.basePath.
basePath: ""
# Custom node affinity label(s) for example "openebs.io/node-affinity-value"
# that will be used instead of hostnames
# This helps in cases where the hostname changes when the node is removed and
# added back with the disks still intact.
# Example:
# nodeAffinityLabels:
# - "openebs.io/node-affinity-key-1"
# - "openebs.io/node-affinity-key-2"
nodeAffinityLabels: []
# Prerequisite: XFS Quota requires an XFS filesystem mounted with
# the 'pquota' or 'prjquota' mount option.
xfsQuota:
# If true, enables XFS project quota
enabled: false
# Detailed configuration options for XFS project quota.
# If XFS Quota is enabled with the default values, the usage limit
# is set at the storage capacity specified in the PVC.
softLimitGrace: "0%"
hardLimitGrace: "0%"
# Prerequisite: EXT4 Quota requires an EXT4 filesystem mounted with
# the 'prjquota' mount option.
ext4Quota:
# If true, enables XFS project quota
enabled: false
# Detailed configuration options for EXT4 project quota.
# If EXT4 Quota is enabled with the default values, the usage limit
# is set at the storage capacity specified in the PVC.
softLimitGrace: "0%"
hardLimitGrace: "0%"
snapshotOperator:
enabled: true
controller:
## snapshot controller resource requests and limits
## Reference: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# limits:
# cpu: 1000m
# memory: 2Gi
# requests:
# cpu: 500m
# memory: 1Gi
provisioner:
## snapshot provisioner resource requests and limits
## Reference: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
# limits:
# cpu: 1000m
# memory: 2Gi
requests:
cpu: 1m
memory: 20Mi
replicas: 1
enableLeaderElection: true
upgradeStrategy: "Recreate"
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
# If you want to enable openebs as a dependency chart then set `ndm.enabled: false`,
# `ndmOperator.enabled: false` and enable it as dependency chart. If you are using
# custom configuration then update those configuration under `openebs-ndm` key.
ndm:
enabled: true
sparse:
# --set ndm.sparse.path="$SNAP_COMMON/var/openebs/sparse"
#path: "/nvme/openebs/sparse"
#size: "10737418240"
#count: "0"
filters:
enableOsDiskExcludeFilter: true
osDiskExcludePaths: "/,/etc/hosts,/boot"
enableVendorFilter: true
excludeVendors: "CLOUDBYT,OpenEBS,TrueNAS"
enablePathFilter: true
includePaths: "/dev/sdb"
excludePaths: "/dev/loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd,/dev/zd"
probes:
enableSeachest: false
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
## ndm resource requests and limits
## Reference: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
# limits:
# cpu: 1000m
# memory: 2Gi
requests:
cpu: 1m
memory: 20Mi
# If you want to enable openebs as a dependency chart then set `ndm.enabled: false`,
# `ndmOperator.enabled: false` and enable it as dependency chart. If you are using
# custom configuration then update those configuration under `openebs-ndm` key.
ndmOperator:
enabled: true
replicas: 1
upgradeStrategy: Recreate
healthCheck:
initialDelaySeconds: 15
periodSeconds: 20
readinessCheck:
initialDelaySeconds: 5
periodSeconds: 10
## ndmOperator resource requests and limits
## Reference: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
# limits:
# cpu: 1000m
# memory: 2Gi
requests:
cpu: 2m
memory: 8Mi
ndmExporter:
enabled: true
nodeExporter:
name: ndm-node-exporter
podLabels:
name: openebs-ndm-node-exporter
# The TCP port number used for exposing ndm-node-exporter metrics.
# If not set, service will not be created to expose metrics endpoint to serviceMonitor
# and listen-port flag will not be set and container port will be empty.
metricsPort: 9101
clusterExporter:
name: ndm-cluster-exporter
podLabels:
name: openebs-ndm-cluster-exporter
# The TCP port number used for exposing ndm-cluster-exporter metrics.
# If not set, service will not be created to expose metrics endpoint to serviceMonitor
# and listen-port flag will not be set and container port will be empty.
metricsPort: 9100
webhook:
enabled: true
failurePolicy: "Fail"
replicas: 1
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
hostNetwork: false
## admission-server resource requests and limits
## Reference: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# limits:
# cpu: 500m
# memory: 1Gi
# requests:
# cpu: 250m
# memory: 500Mi
# These are ndm related configuration. If you want to enable openebs as a dependency
# chart then set `ndm.enabled: false`, `ndmOperator.enabled: false` and enable it as
# dependency chart. If you are using custom configuration then update those configuration
# under `openebs-ndm` key.
featureGates:
enabled: true
GPTBasedUUID:
enabled: true
featureGateFlag: "GPTBasedUUID"
APIService:
enabled: false
featureGateFlag: "APIService"
address: "0.0.0.0:9115"
UseOSDisk:
enabled: false
featureGateFlag: "UseOSDisk"
ChangeDetection:
enabled: false
featureGateFlag: "ChangeDetection"
PartitionTableUUID:
enabled: false
featureGateFlag: "PartitionTableUUID"
crd:
enableInstall: true
analytics:
enabled: true
# Specify in hours the duration after which a ping event needs to be sent.
pingInterval: "24h"
jiva:
# --set jiva.csiNode.kubeletDir="$SNAP_COMMON/var/lib/kubelet/"
csiNode:
kubeletDir: "/var/snap/microk8s/common/var/lib/kubelet/"
# non csi configuration
replicas: 3
defaultStoragePath: "/var/openebs"
# jiva csi driver configuration
# do not enable or configure any sub dependency here
# only jiva csi related settings can be added here
# ref - https://openebs.github.io/jiva-operator
# jiva chart dependency tree is here -
# jiva
# | - localpv-provisioner
# | | - openebs-ndm
# Enable localpv-provisioner and openebs-ndm as root dependency not as
# sub dependency.
# openebs
# | - jiva(enable)
# | | - localpv-provisioner(disable)
# | | | - openebs-ndm(disable)
# | - localpv-provisioner(enable)
# | - openebs-ndm(enable)
# --set jiva.enabled=true
enabled: false
openebsLocalpv:
enabled: false
localpv-provisioner:
openebsNDM:
enabled: false
cstor:
# --set cstor.csiNode.kubeletDir="$SNAP_COMMON/var/lib/kubelet/"
csiNode:
kubeletDir: "/var/snap/microk8s/common/var/lib/kubelet/"
# cstor csi driver configuration
# do not enable or configure any sub dependency here
# only cstor csi related settings can be added here
# ref - https://openebs.github.io/cstor-operators
# cstor chart dependency tree is here -
# cstor
# | - openebs-ndm
# Enable openebs-ndm as root dependency not as sub dependency.
# openebs
# | - cstor(enable)
# | | - openebs-ndm(disable)
# | - openebs-ndm(enable)
# --set cstor.enabled=true \
enabled: true
openebsNDM:
enabled: false
# Sample configuration if you want to configure cstor csi driver with custom values.
# This is a small part of the full configuration. Full configuration available
# here - https://openebs.github.io/cstor-operators
cspcOperator:
poolManager:
image:
registry: quay.io/
repository: openebs/cstor-pool-manager
tag: 3.4.0
cstorPool:
image:
registry: quay.io/
repository: openebs/cstor-pool
tag: 3.4.0
cstorPoolExporter:
image:
registry: quay.io/
repository: openebs/m-exporter
tag: 3.4.0
image:
registry: quay.io/
repository: openebs/cspc-operator
pullPolicy: IfNotPresent
tag: 3.4.0
cvcOperator:
target:
image:
registry: quay.io/
repository: openebs/cstor-istgt
tag: 3.4.0
volumeMgmt:
image:
registry: quay.io/
repository: openebs/cstor-volume-manager
tag: 3.4.0
volumeExporter:
image:
registry: quay.io/
repository: openebs/m-exporter
tag: 3.4.0
image:
registry: quay.io/
repository: openebs/cvc-operator
pullPolicy: IfNotPresent
tag: 3.4.0
cstorCSIPlugin:
image:
registry: quay.io/
repository: openebs/cstor-csi-driver
pullPolicy: IfNotPresent
tag: 3.4.0
admissionServer:
componentName: cstor-admission-webhook
image:
registry: quay.io/
repository: openebs/cstor-webhook
pullPolicy: IfNotPresent
tag: 3.4.0
# ndm configuration goes here
# https://openebs.github.io/node-disk-manager
openebs-ndm:
enabled: false
# Sample configuration if you want to configure openebs ndm with custom values.
# This is a small part of the full configuration. Full configuration available
# here - https://openebs.github.io/node-disk-manager
# ndm:
# sparse:
# path: "/var/openebs/sparse"
# size: "10737418240"
# count: "0"
# filters:
# enableOsDiskExcludeFilter: true
# osDiskExcludePaths: "/,/etc/hosts,/boot"
# enableVendorFilter: true
# excludeVendors: "CLOUDBYT,OpenEBS"
# enablePathFilter: true
# includePaths: ""
# excludePaths: "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd,/dev/zd"
# probes:
# enableSeachest: false
# enableUdevProbe: true
# enableSmartProbe: true
#
# featureGates:
# enabled: true
# GPTBasedUUID:
# enabled: true
# featureGateFlag: "GPTBasedUUID"
# APIService:
# enabled: false
# featureGateFlag: "APIService"
# address: "0.0.0.0:9115"
# UseOSDisk:
# enabled: false
# featureGateFlag: "UseOSDisk"
# ChangeDetection:
# enabled: false
# featureGateFlag: "ChangeDetection"
#
# varDirectoryPath:
# baseDir: "/var/openebs"
# local pv provisioner configuration goes here
# do not enable or configure any sub dependency here
# ref - https://openebs.github.io/dynamic-localpv-provisioner
# local pv chart dependency tree is here -
# localpv-provisioner
# | - openebs-ndm
# Enable openebs-ndm as root dependency not as sub dependency.
# openebs
# | - localpv-provisioner(enable)
# | | - openebs-ndm(disable)
# | - openebs-ndm(enable)
localpv-provisioner:
enabled: false
openebsNDM:
enabled: false
# Sample configuration if you want to configure openebs locapv with custom values.
# This is a small part of the full configuration. Full configuration available
# here - https://openebs.github.io/dynamic-localpv-provisioner
# localpv:
# healthCheck:
# initialDelaySeconds: 30
# periodSeconds: 60
# replicas: 1
# enableLeaderElection: true
# basePath: "/var/openebs/local"
# lvm local pv configuration goes here
# ref - https://openebs.github.io/lvm-localpv
lvm-localpv:
enabled: false
# Sample configuration if you want to configure lvm localpv with custom values.
# This is a small part of the full configuration. Full configuration available
# here - https://openebs.github.io/lvm-localpv
cleanup:
image:
# Make sure that registry name end with a '/'.
# For example : quay.io/ is a correct value here and quay.io is incorrect
registry:
repository: bitnami/kubectl
tag:
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment