Skip to content

Instantly share code, notes, and snippets.

@brettmilford
Created October 28, 2020 05:59
Show Gist options
  • Save brettmilford/76dbb682d23e012249f0d09c408225ed to your computer and use it in GitHub Desktop.
Save brettmilford/76dbb682d23e012249f0d09c408225ed to your computer and use it in GitHub Desktop.
Single node microk8s rook-ceph + juju-lxd openstack
# NOTE: focal series distro origin is usurri
series: focal
variables:
openstack-origin: &openstack-origin cloud:focal-victoria
source: &source cloud:focal-victoria
debug: &debug true
verbose: &verbose true
loglevel: &loglevel 5
ssl_ca: &ssl_ca
ssl_cert: &ssl_cert
ssl_key: &ssl_key
services:
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
worker-multiplier: 0.1
openstack-origin: *openstack-origin
ceph-osd-replication-count: 1
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
worker-multiplier: 0.1
openstack-origin: *openstack-origin
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
tuning-level: fast
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
worker-multiplier: 0.1
neutron-security-groups: true
overlay-network-type: "gre vxlan"
openstack-origin: *openstack-origin
flat-network-providers: physnet1
global-physnet-mtu: 9000
enable-ml2-port-security: true
# TODO: Deploy OVN chassis
manage-neutron-plugin-legacy-mode: true
neutron-api-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
bridge-mappings: physnet1:br-ex
data-port: br-ex:eth1
worker-multiplier: 0.1
openstack-origin: *openstack-origin
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
num_units: 0
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
worker-multiplier: 0.1
network-manager: Neutron
openstack-origin: *openstack-origin
ram-allocation-ratio: '64'
cpu-allocation-ratio: '64'
disk-allocation-ratio: '64' # should ultimately be limited by lxd-pool size
nova-cloud-controller-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
options:
worker-multiplier: 0.1
openstack-origin: *openstack-origin
placement-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
# NOTE: 'force_raw_images = False' is not allowed with 'images_type = rbd'
#force-raw-images: False # if using zfs backend
resume-guests-state-on-host-boot: true
openstack-origin: *openstack-origin
libvirt-image-backend: rbd
ceph-osd-replication-count: 1
virtio-net-tx-queue-size: 1024
virtio-net-rx-queue-size: 1024
cpu-mode: host-passthrough
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
glance-simplestreams-sync:
charm: cs:~openstack-charmers-next/glance-simplestreams-sync
num_units: 1
options:
mirror_list: |
[{url: 'http://cloud-images.ubuntu.com/releases/',
name_prefix: 'ubuntu:released',
path: 'streams/v1/index.sjson', max: 1,
item_filters: ['release~(trusty|xenial|bionic|focal)',
'arch~(x86_64|amd64)',
'ftype~(disk1.img|disk.img)']}]
use_swift: false
ssl_ca: *ssl_ca
ceph-proxy:
charm: cs:~openstack-charmers-next/ceph-proxy
num_units: 1
options:
source: *source
loglevel: *loglevel
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
worker-multiplier: 0.1
block-device: None
glance-api-version: 2
openstack-origin: *openstack-origin
cinder-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
num_units: 0
options:
pool-type: 'replicated'
ceph-osd-replication-count: 1
relations:
- [ mysql:db-router, cinder-mysql-router:db-router ]
- [ mysql:db-router, nova-cloud-controller-mysql-router:db-router ]
- [ mysql:db-router, neutron-api-mysql-router:db-router ]
- [ mysql:db-router, keystone-mysql-router:db-router ]
- [ mysql:db-router, glance-mysql-router:db-router ]
- [ mysql:db-router, placement-mysql-router:db-router ]
- [ cinder-mysql-router:shared-db, cinder:shared-db ]
- [ nova-cloud-controller-mysql-router:shared-db, nova-cloud-controller:shared-db ]
- [ neutron-api-mysql-router:shared-db, neutron-api:shared-db ]
- [ keystone-mysql-router:shared-db, keystone:shared-db ]
- [ glance-mysql-router:shared-db, glance:shared-db ]
- [ placement-mysql-router:shared-db, placement:shared-db ]
- [ nova-compute:ceph-access, cinder-ceph:ceph-access ]
- [ cinder:image-service, glance:image-service]
- [ cinder:amqp, rabbitmq-server:amqp ]
- [ cinder:identity-service, keystone:identity-service ]
- [ cinder:cinder-volume-service, nova-cloud-controller:cinder-volume-service ]
- [ cinder-ceph:storage-backend, cinder:storage-backend ]
- [ ceph-proxy:client, nova-compute:ceph ]
- [ ceph-proxy:client, cinder-ceph:ceph ]
- [ nova-compute:amqp, rabbitmq-server:amqp ]
- [ neutron-gateway:amqp, rabbitmq-server:amqp ]
- [ nova-cloud-controller:identity-service, keystone:identity-service ]
- [ glance:identity-service, keystone:identity-service ]
- [ neutron-api:identity-service, keystone:identity-service ]
- [ neutron-openvswitch:neutron-plugin-api, neutron-api:neutron-plugin-api ]
- [ neutron-api:amqp, rabbitmq-server:amqp ]
- [ neutron-gateway:neutron-plugin-api, neutron-api:neutron-plugin-api ]
- [ glance:amqp, rabbitmq-server:amqp ]
- [ nova-cloud-controller:image-service, glance:image-service ]
- [ nova-compute:image-service, glance:image-service ]
- [ nova-cloud-controller:cloud-compute, nova-compute:cloud-compute ]
- [ nova-cloud-controller:amqp, rabbitmq-server:amqp ]
- [ nova-cloud-controller:quantum-network-service, neutron-gateway:quantum-network-service ]
- [ nova-compute:neutron-plugin, neutron-openvswitch:neutron-plugin ]
- [ neutron-openvswitch:amqp, rabbitmq-server:amqp ]
- [ nova-cloud-controller:neutron-api, neutron-api:neutron-api ]
- [ placement:identity-service, keystone:identity-service ]
- [ placement, nova-cloud-controller ]
- [ glance-simplestreams-sync:identity-service, keystone:identity-service ]
#!/bin/sh -ex
mon_v2_to_v1() {
IP=$(echo $1 | awk -F ':' '{print $2}')
PORT=6789
MON_HOST="$IP:$PORT"
}
mon_v2_to_v1 $(awk '/mon host/{ print $4 }' /etc/ceph/ceph.conf)
FSID=$(awk '/fsid/{ print $3 }' /etc/ceph/ceph.conf)
ADMIN_KEY=$(awk '/key/{ print $3 }' /etc/ceph/client.admin.keyring)
juju config ceph-proxy monitor-hosts="$MON_HOST"
juju config ceph-proxy fsid="$FSID"
juju config ceph-proxy admin-key="$ADMIN_KEY"
#!/bin/sh -xeu
# Depends on:
# https://github.com/openstack-charmers/openstack-on-lxd/blob/master/openrc
# https://github.com/openstack-charmers/openstack-on-lxd/blob/master/neutron-ext-net-ksv3
# https://github.com/openstack-charmers/openstack-on-lxd/blob/master/neutron-tenant-net-ksv3
#
#if [ -n $1 ]; then
_current_juju_controller=`juju show-model --format json | jq -r '.[]["controller-name"]'` _current_juju_model=`juju show-model --format json | jq -r '.[]["name"]'` #fi
JUJU_MODEL=${1:-"${_current_juju_controller}:${_current_juju_model}"}
_juju_model_arg=${JUJU_MODEL:+--model=$JUJU_MODEL}
is_ksv3 () {
api_ver="`juju config ${_juju_model_arg:-} keystone preferred-api-version`"
rel="`juju config ${_juju_model_arg:-} keystone openstack-origin| sed -r 's/.+-(.+)/\1/g'`"
rel="`echo -e "$rel\nqueens"| sort| head -n 1`"
series=`juju status ${_juju_model_arg:-} keystone --format=json| jq .machines[].series| tr -d '"'`
if [ "$api_ver" = "3" ] || [ "${rel%%/*}" > "pike" ] || \
{ [ "$series" > "artful" ] && [ "$series" < "trusty" ]; }; then
return 0
else
return 1
fi
}
# Sanitize language settings to avoid commands bailing out
# with "unsupported locale setting" errors.
unset LANG
unset LANGUAGE
LC_ALL=C
export LC_ALL
for i in curl openstack; do
if [ ! $(type ${i} 2>/dev/null) ]; then
if [ "${i}" == 'curl' ]; then
echo "Please install ${i} before proceeding"
else
echo "Please install python-${i}client before proceeding"
fi
exit
fi
done
if is_ksv3; then
. ./novarc
else
. ./novarc_ksv2
fi
# Test for credentials set
if [ "${OS_USERNAME}" = "" ]; then
echo "No Keystone credentials specified. Try running source openrc"
exit
fi
echo Configuring neutron.
# This EXT_NET_CIDR is your public network,that you want to connect to the internet via.
EXT_NET_CIDR='10.64.0.0/16'
EXT_NET_RANGE='start=10.64.128.1,end=10.64.255.254'
FIP_RANGE='10.64.128.1:10.64.255.254'
EXT_NET_GATEWAY='10.64.0.1'
CIDR_PRIV="192.168.21.0/24"
CIDR_PRIV_ADM="192.168.22.0/24"
NAMESERVER="10.64.0.1"
NS_ARG=""
[ "`juju config ${_juju_model_arg:-} neutron-api enable-ml2-dns`" = "true" ] && unset NAMESERVER || NS_ARG="-N $NAMESERVER"
if ((`juju status ${_juju_model_arg:-} ovn-chassis --format=json 2>/dev/null| jq '.machines| length'`)); then
# ovn defaults to geneve
# TODO: Why is this using $1
#net_type=${1:-"geneve"}
net_type=${NET_TYPE:-"geneve"}
else
net_type=${NET_TYPE:-"gre"}
fi
if is_ksv3; then
python3 ./openstack-on-lxd/neutron-ext-net-ksv3 --project admin --network-type flat -g $EXT_NET_GATEWAY -c $EXT_NET_CIDR -f $FIP_RANGE ext_net
python3 ./openstack-on-lxd/neutron-tenant-net-ksv3 --project=admin --network-type=$net_type -r provider-router $NS_ARG int_net $CIDR_PRIV
else
python3 ./openstack-on-lxd/neutron-ext-net --network-type flat -g $EXT_NET_GATEWAY -c $EXT_NET_CIDR -f $FIP_RANGE ext_net
python3 ./openstack-on-lxd/neutron-tenant-net --network-type $net_type -t admin -r provider-router $NS_ARG int_net $CIDR_PRIV
fi
# Sec Group Config
for i in $(openstack security group list | awk '/default/{ print $2 }'); do \
openstack security group rule create $i --protocol icmp --remote-ip 0.0.0.0/0; \
openstack security group rule create $i --protocol tcp --remote-ip 0.0.0.0/0 --dst-port 22; \
if ! `juju status ${_juju_model_arg:-} heat-* | grep Nothing`; then
# Open heat-cfn so it can run on a different host
openstack security group rule create --ingress --ethertype IPv4 \
--protocol tcp --dst-port 8000 $i
openstack security group rule create --ingress --ethertype IPv4 \
--protocol tcp --dst-port 8080 $i
fi
done
if [ -f ~/.ssh/testkey.pub ] || [ `ssh-add -l | wc -l` -gt 0 ]; then
:
else
echo Generating ssh key.
ssh-keygen -t rsa -f ~/.ssh/testkey -N
fi
if [ -r ~/.ssh/id_rsa.pub ]; then
echo Configuring nova public key
openstack keypair create --public-key ~/.ssh/id_rsa.pub testkey
fi
# Unlimited quotas for everyone
echo Configuring quotas.
for admin in $(openstack project list | awk '/ admin / {print $2}'); do
openstack quota set --instances -1 $admin
openstack quota set --floating-ips -1 $admin
openstack quota set --cores -1 $admin
openstack quota set --ram 88000 $admin # 48G Note: Change this
openstack quota set --gigabytes -1 $admin
openstack quota set --volumes -1 $admin
openstack quota set --secgroups -1 $admin
openstack quota set --secgroup-rules -1 $admin
openstack quota set --ports -1 $admin
done
# add default flavors, if they don't already exist
if ! openstack flavor list | grep -q m1.tiny; then
openstack flavor create --id 1 --ram 512 --disk 1 --vcpus 1 m1.tiny
openstack flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 m1.small
#--property hw:mem_page_size=1GB
openstack flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 m1.medium
#--property hw:mem_page_size=1GB
openstack flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 m1.large
#--property hw:mem_page_size=1GB
openstack flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 m1.xlarge
#--property hw:mem_page_size=1GB
fi
---
name: default
config:
user.user-data: |
#cloud-config
ssh_import_id: lp:brettmilford
apt:
http_proxy: http://10.64.0.1:8000
write_files:
- path: /etc/sysctl.conf
content: |
net.core.netdev_max_backlog=182757
- path: /etc/udev/rules.d/80-netdev.rules
content: |
SUBSYSTEM=="net", ACTION=="add|change", KERNEL=="*" ATTR{tx_queue_len}="10000"
runcmd:
- sysctl -p
devices:
eth0:
mtu: "9000"
nictype: bridged
parent: lxdbr0
type: nic
root:
path: /
pool: default
type: disk
---
name: juju-default
config:
boot.autostart: "true"
security.nesting: "true" # required for neutron
security.privileged: "true" # required for neutron
linux.kernel_modules: openvswitch,nbd,ip_tables,ip6_tables,vhost_net # required for neutron-gateway, vhost_net required for virtio?
devices:
eth1:
mtu: "9000"
name: eth1
nictype: bridged
parent: lxdbr0
type: nic
mem:
path: /dev/mem
type: unix-char
tun:
path: /dev/net/tun
type: unix-char
kvm:
path: /dev/kvm # not supported/present in multipass (on osx)
type: unix-char
vhost-net:
path: /dev/vhost-net
type: unix-char
#!/bin/sh -xeu
CLOUD=${1:-'uc'}
UNDERCLOUD=${2:-'lxd-localhost:default'}
_juju_model_arg=${UNDERCLOUD:+--model=$UNDERCLOUD} . ./novarc
KEYSTONE=`openstack endpoint list --interface public -f value -c 'Service Type' -c URL | awk '/identity/{ print $2 }'`
REGION=`openstack region list -f value -c Region`
clouddef=$(mktemp)
cat <<EOF > $clouddef
clouds:
${CLOUD}: # arbitary
type: openstack
auth-types: [ userpass ]
regions:
${REGION}: # must match nova
endpoint: ${KEYSTONE}
auth-types: [ userpass ]
EOF
juju update-cloud --client $CLOUD -f $clouddef
juju autoload-credentials --client openstack
rm $clouddef
LXDBR_IP=$(lxc network get lxdbr0 ipv4.address | head -c-4)
EXT_NET=$(openstack network list | awk '/ext_net/{ print $2 }')
INT_NET=$(openstack network list | awk '/int_net/{ print $2 }')
ARGS="network=${INT_NET}
external-network=${EXT_NET}
use-floating-ip=true
image-stream=released
default-series=bionic
enable-os-refresh-update=true
enable-os-upgrade=false
logging-config=<root>=DEBUG
apt-http-proxy=http://${LXDBR_IP}:8000"
set -- $ARGS
MODEL_CONFIG=""
MODEL_DEFAULT=""
for ARG in $ARGS
do
MODEL_CONFIG="${MODEL_CONFIG}--config $1 "
MODEL_DEFAULT="${MODEL_DEFAULT}--model-default $1 "
shift
done
juju bootstrap $CLOUD --no-gui --debug $MODEL_CONFIG $MODEL_DEFAULT
config: {}
networks:
- config:
# need a larger address range than what "auto" assigns (/24)
ipv4.address: 10.64.0.1/16
ipv4.dhcp.ranges: 10.64.0.2-10.64.128.0
ipv4.nat: true
ipv6.address: none
# default expiry (1h) wedges juju deployments frequently
# 168h = 7 days, else use 'infinite'
ipv4.dhcp.expiry: 168h
description: "Default network for LXD"
name: lxdbr0
type: ""
storage_pools:
- config:
ceph.osd.pool_name: lxd-default
description: "Default ceph storage pool for LXD"
name: default
driver: ceph
profiles:
- config: {}
description: "Default profile for LXD"
devices:
eth0:
name: eth0
nictype: bridged
parent: lxdbr0
type: nic
root:
path: /
pool: default
type: disk
name: default
cluster: null
#################################################################################################################
# Modified from https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster-test.yaml
#################################################################################################################
kind: ConfigMap
apiVersion: v1
metadata:
name: rook-config-override
namespace: rook-ceph
data:
config: |
[global]
osd_pool_default_size = 1
mon_warn_on_pool_no_redundancy = false
---
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
dataDirHostPath: /var/lib/rook
cephVersion:
image: ceph/ceph:v15
allowUnsupported: true
mon:
count: 1
allowMultiplePerNode: true
dashboard:
enabled: false
network:
hostNetwork: true
crashCollector:
disable: true
storage:
useAllNodes: true
useAllDevices: false
# Note: update this
devicePathFilter: ^/dev/disk/by-id/ata-SPCC_Solid_State_Disk_.*
# config:
# metadataDevice: "sde"
#################################################################################################################
# Modified from https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/operator.yaml
###############################################################################################################
# Rook Ceph Operator Config ConfigMap
# Use this ConfigMap to override Rook-Ceph Operator configurations.
# NOTE! Precedence will be given to this config if the same Env Var config also exists in the
# Operator Deployment.
# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
# here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
kind: ConfigMap
apiVersion: v1
metadata:
name: rook-ceph-operator-config
# should be in the namespace of the operator
namespace: rook-ceph
data:
# Enable the CSI driver.
# To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
ROOK_CSI_ENABLE_CEPHFS: "true"
# Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
ROOK_CSI_ENABLE_RBD: "true"
ROOK_CSI_ENABLE_GRPC_METRICS: "true"
# Set logging level for csi containers.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
# CSI_LOG_LEVEL: "0"
# Enable cephfs kernel driver instead of ceph-fuse.
# If you disable the kernel client, your application may be disrupted during upgrade.
# See the upgrade guide: https://rook.io/docs/rook/master/ceph-upgrade.html
# NOTE! cephfs quota is not supported in kernel version < 4.17
CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
# (Optional) Allow starting unsupported ceph-csi image
ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
# The default version of CSI supported by Rook will be started. To change the version
# of the CSI driver to something other than what is officially supported, change
# these images to the desired release of the CSI driver.
# ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.0.0"
# ROOK_CSI_REGISTRAR_IMAGE: "quay.io/k8scsi/csi-node-driver-registrar:v1.2.0"
# ROOK_CSI_RESIZER_IMAGE: "quay.io/k8scsi/csi-resizer:v0.4.0"
# ROOK_CSI_PROVISIONER_IMAGE: "quay.io/k8scsi/csi-provisioner:v1.6.0"
# ROOK_CSI_SNAPSHOTTER_IMAGE: "quay.io/k8scsi/csi-snapshotter:v2.1.1"
# ROOK_CSI_ATTACHER_IMAGE: "quay.io/k8scsi/csi-attacher:v2.1.0"
# (Optional) set user created priorityclassName for csi plugin pods.
# CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
# (Optional) set user created priorityclassName for csi provisioner pods.
# CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
# CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
# CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
# CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
# CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
# kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
ROOK_CSI_KUBELET_DIR_PATH: "/var/snap/microk8s/common/var/lib/kubelet"
# (Optional) Ceph Provisioner NodeAffinity.
# CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
# (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_PROVISIONER_TOLERATIONS: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/controlplane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) Ceph CSI plugin NodeAffinity.
# CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
# (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_PLUGIN_TOLERATIONS: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/controlplane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
# requests and limits you want to apply for provisioner pod
# CSI_RBD_PROVISIONER_RESOURCE: |
# - name : csi-provisioner
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-resizer
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-attacher
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-snapshotter
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-rbdplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
# requests and limits you want to apply for plugin pod
# CSI_RBD_PLUGIN_RESOURCE: |
# - name : driver-registrar
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# - name : csi-rbdplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
# requests and limits you want to apply for provisioner pod
# CSI_CEPHFS_PROVISIONER_RESOURCE: |
# - name : csi-provisioner
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-resizer
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-attacher
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-cephfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
# requests and limits you want to apply for plugin pod
# CSI_CEPHFS_PLUGIN_RESOURCE: |
# - name : driver-registrar
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# - name : csi-cephfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# Configure CSI CSI Ceph FS grpc and liveness metrics port
# CSI_CEPHFS_GRPC_METRICS_PORT: "9091"
# CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
# Configure CSI RBD grpc and liveness metrics port
# CSI_RBD_GRPC_METRICS_PORT: "9090"
# CSI_RBD_LIVENESS_METRICS_PORT: "9080"
# Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
---
# OLM: BEGIN OPERATOR DEPLOYMENT
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-operator
namespace: rook-ceph
labels:
operator: rook
storage-backend: ceph
spec:
selector:
matchLabels:
app: rook-ceph-operator
replicas: 1
template:
metadata:
labels:
app: rook-ceph-operator
spec:
serviceAccountName: rook-ceph-system
containers:
- name: rook-ceph-operator
image: rook/ceph:v1.4.0
args: ["ceph", "operator"]
volumeMounts:
- mountPath: /var/lib/rook
name: rook-config
- mountPath: /etc/ceph
name: default-config-dir
env:
# If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
# If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
- name: ROOK_CURRENT_NAMESPACE_ONLY
value: "false"
# To disable RBAC, uncomment the following:
# - name: RBAC_ENABLED
# value: "false"
# Rook Agent toleration. Will tolerate all taints with all keys.
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
# - name: AGENT_TOLERATION
# value: "NoSchedule"
# (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate
# - name: AGENT_TOLERATION_KEY
# value: "<KeyOfTheTaintToTolerate>"
# (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format.
# - name: AGENT_TOLERATIONS
# value: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/controlplane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) Rook Agent priority class name to set on the pod(s)
# - name: AGENT_PRIORITY_CLASS_NAME
# value: "<PriorityClassName>"
# (Optional) Rook Agent NodeAffinity.
# - name: AGENT_NODE_AFFINITY
# value: "role=storage-node; storage=rook,ceph"
# (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`.
# `Any` uses Ceph admin credentials by default/fallback.
# For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and
# set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name.
# to the namespace in which the `mountSecret` Kubernetes secret namespace.
# - name: AGENT_MOUNT_SECURITY_MODE
# value: "Any"
# Set the path where the Rook agent can find the flex volumes
# - name: FLEXVOLUME_DIR_PATH
# value: "<PathToFlexVolumes>"
# Set the path where kernel modules can be found
# - name: LIB_MODULES_DIR_PATH
# value: "<PathToLibModules>"
# Mount any extra directories into the agent container
# - name: AGENT_MOUNTS
# value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2"
# Rook Discover toleration. Will tolerate all taints with all keys.
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
# - name: DISCOVER_TOLERATION
# value: "NoSchedule"
# (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
# - name: DISCOVER_TOLERATION_KEY
# value: "<KeyOfTheTaintToTolerate>"
# (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
# - name: DISCOVER_TOLERATIONS
# value: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/controlplane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) Rook Discover priority class name to set on the pod(s)
# - name: DISCOVER_PRIORITY_CLASS_NAME
# value: "<PriorityClassName>"
# (Optional) Discover Agent NodeAffinity.
# - name: DISCOVER_AGENT_NODE_AFFINITY
# value: "role=storage-node; storage=rook, ceph"
# Allow rook to create multiple file systems. Note: This is considered
# an experimental feature in Ceph as described at
# http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster
# which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027
- name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS
value: "false"
# The logging level for the operator: INFO | DEBUG
- name: ROOK_LOG_LEVEL
value: "INFO"
# The duration between discovering devices in the rook-discover daemonset.
- name: ROOK_DISCOVER_DEVICES_INTERVAL
value: "60m"
# Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
# This is necessary to workaround the anyuid issues when running on OpenShift.
# For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
value: "false"
# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
# Disable it here if you have similar issues.
# For more details see https://github.com/rook/rook/issues/2417
- name: ROOK_ENABLE_SELINUX_RELABELING
value: "true"
# In large volumes it will take some time to chown all the files. Disable it here if you have performance issues.
# For more details see https://github.com/rook/rook/issues/2254
- name: ROOK_ENABLE_FSGROUP
value: "true"
# Disable automatic orchestration when new devices are discovered
- name: ROOK_DISABLE_DEVICE_HOTPLUG
value: "false"
# Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
# In case of more than one regex, use comma to seperate between them.
# Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
# Add regex expression after putting a comma to blacklist a disk
# If value is empty, the default regex will be used.
- name: DISCOVER_DAEMON_UDEV_BLACKLIST
value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
# Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release
# in favor of the CSI driver.
- name: ROOK_ENABLE_FLEX_DRIVER
value: "false"
# Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
# This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
- name: ROOK_ENABLE_DISCOVERY_DAEMON
value: "true"
# Time to wait until the node controller will move Rook pods to other
# nodes after detecting an unreachable node.
# Pods affected by this setting are:
# mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
# The value used in this variable replaces the default value of 300 secs
# added automatically by k8s as Toleration for
# <node.kubernetes.io/unreachable>
# The total amount of time to reschedule Rook pods in healthy nodes
# before detecting a <not ready node> condition will be the sum of:
# --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
# --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
- name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
value: "5"
# The name of the node to pass with the downward API
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# The pod name to pass with the downward API
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# The pod namespace to pass with the downward API
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# Uncomment it to run lib bucket provisioner in multithreaded mode
#- name: LIB_BUCKET_PROVISIONER_THREADS
# value: "5"
# Uncomment it to run rook operator on the host network
hostNetwork: true
volumes:
- name: rook-config
emptyDir: {}
- name: default-config-dir
emptyDir: {}
# OLM: END OPERATOR DEPLOYMENT
#!/bin/sh -xe
_DIR=$(dirname `readlink -f $0`)
deploy() {
if ! type microk8s; then
sudo snap install microk8s --classic
sudo usermod -a -G microk8s $USER
sudo chown -f -R $USER ~/.kube
su - $USER
microk8s.enable dns
fi
microk8s.kubectl create -f ${_DIR}/rook-common.yaml
microk8s.kubectl create -f ${_DIR}/microk8s-rook-ceph-operator.yaml
microk8s.kubectl create -f ${_DIR}/microk8s-rook-ceph-cluster.yaml
microk8s.kubectl -n rook-ceph get pods -w
if ! grep 'CEPH_CONF' ~/.bashrc; then
export CEPH_CONF='/var/lib/rook/rook-ceph/rook-ceph.config'
echo "CEPH_CONF='/var/lib/rook/rook-ceph/rook-ceph.config'" | tee -a '~/.bashrc'
fi
sudo mkdir -p /etc/ceph
if ! [ -f /etc/ceph/ceph.conf ]; then
sudo cp /var/lib/rook/rook-ceph/rook-ceph.config /etc/ceph/ceph.conf
sudo cp /var/lib/rook/rook-ceph/client.admin.keyring /etc/ceph/client.admin.keyring
fi
}
destroy () {
microk8s.kubectl -n rook-ceph delete cephcluster rook-ceph || true
microk8s.kubectl delete -f ${_DIR}/microk8s-rook-ceph-operator.yaml || true
microk8s.kubectl delete -f ${_DIR}/rook-common.yaml || true
#for CRD in $(microk8s.kubectl get crd -n rook-ceph | awk '/ceph.rook.io/ {print $1}'); do microk8s.kubectl patch crd -n rook-ceph $CRD --type merge -p '{"metadata":{"finalizers": [null]}}'; done
sudo rm -rf /var/lib/rook
DISKS=$(find /dev | grep $(awk '/devicePathFilter/{ print $2 }' ${_DIR}/microk8s-rook-ceph-cluster.yaml))
echo "Zapping $DISKS"
for d in $DISKS; do
sudo sgdisk -Z $d
done
VGS=$(ls /dev/mapper/ceph--*)
for vg in $VGS; do
[ -h $vg ] && sudo dmsetup remove $vg
done
}
case "${1:-h}" in
deploy)
shift
deploy $@
;;
destroy)
shift
destroy $@
;;
-h|--help|help|*)
echo "${0} ( create | destroy )"
esac
default-series: bionic
enable-os-refresh-update: true
enable-os-upgrade: false
logging-config: <root>=DEBUG
###################################################################################################################
# From https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/common.yaml
###################################################################################################################
# Namespace where the operator and other rook resources are created
apiVersion: v1
kind: Namespace
metadata:
name: rook-ceph
# OLM: BEGIN CEPH CRD
# The CRD declarations
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephclusters.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephCluster
listKind: CephClusterList
plural: cephclusters
singular: cephcluster
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
annotations: {}
cephVersion:
properties:
allowUnsupported:
type: boolean
image:
type: string
dashboard:
properties:
enabled:
type: boolean
urlPrefix:
type: string
port:
type: integer
minimum: 0
maximum: 65535
ssl:
type: boolean
dataDirHostPath:
pattern: ^/(\S+)
type: string
disruptionManagement:
properties:
machineDisruptionBudgetNamespace:
type: string
managePodBudgets:
type: boolean
osdMaintenanceTimeout:
type: integer
manageMachineDisruptionBudgets:
type: boolean
skipUpgradeChecks:
type: boolean
continueUpgradeAfterChecksEvenIfNotHealthy:
type: boolean
mon:
properties:
allowMultiplePerNode:
type: boolean
count:
maximum: 9
minimum: 0
type: integer
volumeClaimTemplate: {}
mgr:
properties:
modules:
items:
properties:
name:
type: string
enabled:
type: boolean
network:
properties:
hostNetwork:
type: boolean
provider:
type: string
selectors: {}
storage:
properties:
disruptionManagement:
properties:
machineDisruptionBudgetNamespace:
type: string
managePodBudgets:
type: boolean
osdMaintenanceTimeout:
type: integer
manageMachineDisruptionBudgets:
type: boolean
useAllNodes:
type: boolean
nodes:
items:
properties:
name:
type: string
config:
properties:
metadataDevice:
type: string
storeType:
type: string
pattern: ^(bluestore)$
databaseSizeMB:
type: string
walSizeMB:
type: string
journalSizeMB:
type: string
osdsPerDevice:
type: string
encryptedDevice:
type: string
pattern: ^(true|false)$
useAllDevices:
type: boolean
deviceFilter:
type: string
devicePathFilter:
type: string
devices:
type: array
items:
properties:
name:
type: string
config: {}
resources: {}
type: array
useAllDevices:
type: boolean
deviceFilter:
type: string
devicePathFilter:
type: string
config: {}
storageClassDeviceSets: {}
driveGroups:
type: array
items:
properties:
name:
type: string
spec: {}
placement: {}
required:
- name
- spec
monitoring:
properties:
enabled:
type: boolean
rulesNamespace:
type: string
externalMgrEndpoints:
type: array
items:
properties:
ip:
type: string
removeOSDsIfOutAndSafeToRemove:
type: boolean
external:
properties:
enable:
type: boolean
cleanupPolicy:
properties:
confirmation:
type: string
pattern: ^$|^yes-really-destroy-data$
sanitizeDisks:
properties:
method:
type: string
pattern: ^(complete|quick)$
dataSource:
type: string
pattern: ^(zero|random)$
iteration:
type: integer
format: int32
placement: {}
resources: {}
healthCheck: {}
subresources:
status: {}
additionalPrinterColumns:
- name: DataDirHostPath
type: string
description: Directory used on the K8s nodes
JSONPath: .spec.dataDirHostPath
- name: MonCount
type: string
description: Number of MONs
JSONPath: .spec.mon.count
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
- name: Phase
type: string
description: Phase
JSONPath: .status.phase
- name: Message
type: string
description: Message
JSONPath: .status.message
- name: Health
type: string
description: Ceph Health
JSONPath: .status.ceph.health
# OLM: END CEPH CRD
# OLM: BEGIN CEPH CLIENT CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephclients.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephClient
listKind: CephClientList
plural: cephclients
singular: cephclient
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
caps:
type: object
subresources:
status: {}
# OLM: END CEPH CLIENT CRD
# OLM: BEGIN CEPH RBD MIRROR CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephrbdmirrors.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephRBDMirror
listKind: CephRBDMirrorList
plural: cephrbdmirrors
singular: cephrbdmirror
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
count:
type: integer
minimum: 1
maximum: 100
subresources:
status: {}
# OLM: END CEPH RBD MIRROR CRD
# OLM: BEGIN CEPH FS CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephfilesystems.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephFilesystem
listKind: CephFilesystemList
plural: cephfilesystems
singular: cephfilesystem
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
metadataServer:
properties:
activeCount:
minimum: 1
maximum: 10
type: integer
activeStandby:
type: boolean
annotations: {}
placement: {}
resources: {}
metadataPool:
properties:
failureDomain:
type: string
replicated:
properties:
size:
minimum: 0
maximum: 10
type: integer
requireSafeReplicaSize:
type: boolean
erasureCoded:
properties:
dataChunks:
minimum: 0
maximum: 10
type: integer
codingChunks:
minimum: 0
maximum: 10
type: integer
compressionMode:
type: string
enum:
- ""
- none
- passive
- aggressive
- force
dataPools:
type: array
items:
properties:
failureDomain:
type: string
replicated:
properties:
size:
minimum: 0
maximum: 10
type: integer
requireSafeReplicaSize:
type: boolean
erasureCoded:
properties:
dataChunks:
minimum: 0
maximum: 10
type: integer
codingChunks:
minimum: 0
maximum: 10
type: integer
compressionMode:
type: string
enum:
- ""
- none
- passive
- aggressive
- force
parameters:
type: object
preservePoolsOnDelete:
type: boolean
additionalPrinterColumns:
- name: ActiveMDS
type: string
description: Number of desired active MDS daemons
JSONPath: .spec.metadataServer.activeCount
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
subresources:
status: {}
# OLM: END CEPH FS CRD
# OLM: BEGIN CEPH NFS CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephnfses.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephNFS
listKind: CephNFSList
plural: cephnfses
singular: cephnfs
shortNames:
- nfs
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
rados:
properties:
pool:
type: string
namespace:
type: string
server:
properties:
active:
type: integer
annotations: {}
placement: {}
resources: {}
subresources:
status: {}
# OLM: END CEPH NFS CRD
# OLM: BEGIN CEPH OBJECT STORE CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephobjectstores.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephObjectStore
listKind: CephObjectStoreList
plural: cephobjectstores
singular: cephobjectstore
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
gateway:
properties:
type:
type: string
sslCertificateRef: {}
port:
type: integer
minimum: 1
maximum: 65535
securePort: {}
instances:
type: integer
externalRgwEndpoints:
type: array
items:
properties:
ip:
type: string
annotations: {}
placement: {}
resources: {}
metadataPool:
properties:
failureDomain:
type: string
replicated:
properties:
size:
type: integer
requireSafeReplicaSize:
type: boolean
erasureCoded:
properties:
dataChunks:
type: integer
codingChunks:
type: integer
compressionMode:
type: string
enum:
- ""
- none
- passive
- aggressive
- force
parameters:
type: object
dataPool:
properties:
failureDomain:
type: string
replicated:
properties:
size:
type: integer
requireSafeReplicaSize:
type: boolean
erasureCoded:
properties:
dataChunks:
type: integer
codingChunks:
type: integer
compressionMode:
type: string
enum:
- ""
- none
- passive
- aggressive
- force
parameters:
type: object
preservePoolsOnDelete:
type: boolean
healthCheck:
properties:
bucket:
properties:
enabled:
type: boolean
interval:
type: string
subresources:
status: {}
# OLM: END CEPH OBJECT STORE CRD
# OLM: BEGIN CEPH OBJECT STORE USERS CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephobjectstoreusers.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephObjectStoreUser
listKind: CephObjectStoreUserList
plural: cephobjectstoreusers
singular: cephobjectstoreuser
shortNames:
- rcou
- objectuser
scope: Namespaced
version: v1
subresources:
status: {}
# OLM: END CEPH OBJECT STORE USERS CRD
# OLM: BEGIN CEPH OBJECT REALM CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephobjectrealms.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephObjectRealm
listKind: CephObjectRealmList
plural: cephobjectrealms
singular: cephobjectrealm
scope: Namespaced
version: v1
subresources:
status: {}
# OLM: END CEPH OBJECT REALM CRD
# OLM: BEGIN CEPH OBJECT ZONEGROUP CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephobjectzonegroups.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephObjectZoneGroup
listKind: CephObjectZoneGroupList
plural: cephobjectzonegroups
singular: cephobjectzonegroup
scope: Namespaced
version: v1
subresources:
status: {}
# OLM: END CEPH OBJECT ZONEGROUP CRD
# OLM: BEGIN CEPH OBJECT ZONE CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephobjectzones.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephObjectZone
listKind: CephObjectZoneList
plural: cephobjectzones
singular: cephobjectzone
scope: Namespaced
version: v1
subresources:
status: {}
# OLM: END CEPH OBJECT ZONE CRD
# OLM: BEGIN CEPH BLOCK POOL CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephblockpools.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephBlockPool
listKind: CephBlockPoolList
plural: cephblockpools
singular: cephblockpool
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
failureDomain:
type: string
replicated:
properties:
size:
type: integer
minimum: 0
maximum: 9
targetSizeRatio:
type: number
requireSafeReplicaSize:
type: boolean
erasureCoded:
properties:
dataChunks:
type: integer
minimum: 0
maximum: 9
codingChunks:
type: integer
minimum: 0
maximum: 9
compressionMode:
type: string
enum:
- ""
- none
- passive
- aggressive
- force
enableRBDStats:
description: EnableRBDStats is used to enable gathering of statistics
for all RBD images in the pool
type: boolean
parameters:
type: object
subresources:
status: {}
# OLM: END CEPH BLOCK POOL CRD
# OLM: BEGIN CEPH VOLUME POOL CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: volumes.rook.io
spec:
group: rook.io
names:
kind: Volume
listKind: VolumeList
plural: volumes
singular: volume
shortNames:
- rv
scope: Namespaced
version: v1alpha2
subresources:
status: {}
# OLM: END CEPH VOLUME POOL CRD
# OLM: BEGIN OBJECTBUCKET CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: objectbuckets.objectbucket.io
spec:
group: objectbucket.io
versions:
- name: v1alpha1
served: true
storage: true
names:
kind: ObjectBucket
listKind: ObjectBucketList
plural: objectbuckets
singular: objectbucket
shortNames:
- ob
- obs
scope: Cluster
subresources:
status: {}
# OLM: END OBJECTBUCKET CRD
# OLM: BEGIN OBJECTBUCKETCLAIM CRD
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: objectbucketclaims.objectbucket.io
spec:
versions:
- name: v1alpha1
served: true
storage: true
group: objectbucket.io
names:
kind: ObjectBucketClaim
listKind: ObjectBucketClaimList
plural: objectbucketclaims
singular: objectbucketclaim
shortNames:
- obc
- obcs
scope: Namespaced
subresources:
status: {}
# OLM: END OBJECTBUCKETCLAIM CRD
# OLM: BEGIN OBJECTBUCKET ROLEBINDING
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-object-bucket
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-object-bucket
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: rook-ceph
# OLM: END OBJECTBUCKET ROLEBINDING
# OLM: BEGIN OPERATOR ROLE
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-admission-controller
namespace: rook-ceph
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-admission-controller-role
rules:
- apiGroups: ["ceph.rook.io"]
resources: ["*"]
verbs: ["get", "watch", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-admission-controller-rolebinding
subjects:
- kind: ServiceAccount
name: rook-ceph-admission-controller
apiGroup: ""
namespace: rook-ceph
roleRef:
kind: ClusterRole
name: rook-ceph-admission-controller-role
apiGroup: rbac.authorization.k8s.io
---
# The cluster role for managing all the cluster-specific resources in a namespace
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-cluster-mgmt
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
- apps
- extensions
resources:
- secrets
- pods
- pods/log
- services
- configmaps
- deployments
- daemonsets
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
---
# The role for the operator to manage resources in its own namespace
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: rook-ceph-system
namespace: rook-ceph
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
resources:
- pods
- configmaps
- services
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- apps
- extensions
resources:
- daemonsets
- statefulsets
- deployments
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- k8s.cni.cncf.io
resources:
- network-attachment-definitions
verbs:
- get
---
# The cluster role for managing the Rook CRDs
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-global
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
resources:
# Pod access is needed for fencing
- pods
# Node access is needed for determining nodes where mons should run
- nodes
- nodes/proxy
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
# PVs and PVCs are managed by the Rook provisioner
- persistentvolumes
- persistentvolumeclaims
- endpoints
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- ceph.rook.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- rook.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- policy
- apps
- extensions
resources:
# This is for the clusterdisruption controller
- poddisruptionbudgets
# This is for both clusterdisruption and nodedrain controllers
- deployments
- replicasets
verbs:
- "*"
- apiGroups:
- healthchecking.openshift.io
resources:
- machinedisruptionbudgets
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- machine.openshift.io
resources:
- machines
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- storage.k8s.io
resources:
- csidrivers
verbs:
- create
- delete
---
# Aspects of ceph-mgr that require cluster-wide access
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-cluster
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
resources:
- configmaps
- nodes
- nodes/proxy
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- list
- get
- watch
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-object-bucket
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
verbs:
- "*"
resources:
- secrets
- configmaps
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- "objectbucket.io"
verbs:
- "*"
resources:
- "*"
# OLM: END OPERATOR ROLE
# OLM: BEGIN SERVICE ACCOUNT SYSTEM
---
# The rook system service account used by the operator, agent, and discovery pods
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-system
namespace: rook-ceph
labels:
operator: rook
storage-backend: ceph
# imagePullSecrets:
# - name: my-registry-secret
# OLM: END SERVICE ACCOUNT SYSTEM
# OLM: BEGIN OPERATOR ROLEBINDING
---
# Grant the operator, agent, and discovery agents access to resources in the namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-system
namespace: rook-ceph
labels:
operator: rook
storage-backend: ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-system
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: rook-ceph
---
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-global
labels:
operator: rook
storage-backend: ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-global
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: rook-ceph
# OLM: END OPERATOR ROLEBINDING
#################################################################################################################
# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph"
# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles
# and bindings accordingly.
#################################################################################################################
# Service account for the Ceph OSDs. Must exist and cannot be renamed.
# OLM: BEGIN SERVICE ACCOUNT OSD
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-osd
namespace: rook-ceph
# imagePullSecrets:
# - name: my-registry-secret
# OLM: END SERVICE ACCOUNT OSD
# OLM: BEGIN SERVICE ACCOUNT MGR
---
# Service account for the Ceph Mgr. Must exist and cannot be renamed.
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-mgr
namespace: rook-ceph
# imagePullSecrets:
# - name: my-registry-secret
# OLM: END SERVICE ACCOUNT MGR
# OLM: BEGIN CMD REPORTER SERVICE ACCOUNT
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-cmd-reporter
namespace: rook-ceph
# OLM: END CMD REPORTER SERVICE ACCOUNT
# OLM: BEGIN CLUSTER ROLE
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-osd
namespace: rook-ceph
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: [ "get", "list", "watch", "create", "update", "delete" ]
- apiGroups: ["ceph.rook.io"]
resources: ["cephclusters", "cephclusters/finalizers"]
verbs: [ "get", "list", "create", "update", "delete" ]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-osd
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
---
# Aspects of ceph-mgr that require access to the system namespace
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
# Aspects of ceph-mgr that operate within the cluster's namespace
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr
namespace: rook-ceph
rules:
- apiGroups:
- ""
resources:
- pods
- services
- pods/log
verbs:
- get
- list
- watch
- delete
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- ceph.rook.io
resources:
- "*"
verbs:
- "*"
# OLM: END CLUSTER ROLE
# OLM: BEGIN CMD REPORTER ROLE
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cmd-reporter
namespace: rook-ceph
rules:
- apiGroups:
- ""
resources:
- pods
- configmaps
verbs:
- get
- list
- watch
- create
- update
- delete
# OLM: END CMD REPORTER ROLE
# OLM: BEGIN CLUSTER ROLEBINDING
---
# Allow the operator to create resources in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cluster-mgmt
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-cluster-mgmt
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: rook-ceph
---
# Allow the osd pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-osd
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-osd
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: rook-ceph
---
# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-mgr
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: rook-ceph
---
# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-system
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-mgr-system
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: rook-ceph
---
# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-mgr-cluster
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: rook-ceph
---
# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-osd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-osd
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: rook-ceph
# OLM: END CLUSTER ROLEBINDING
# OLM: BEGIN CMD REPORTER ROLEBINDING
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cmd-reporter
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-cmd-reporter
subjects:
- kind: ServiceAccount
name: rook-ceph-cmd-reporter
namespace: rook-ceph
# OLM: END CMD REPORTER ROLEBINDING
#################################################################################################################
# Beginning of pod security policy resources. The example will assume the cluster will be created in the
# "rook-ceph" namespace. If you want to create the cluster in a different namespace, you will need to modify
# the roles and bindings accordingly.
#################################################################################################################
# OLM: BEGIN CLUSTER POD SECURITY POLICY
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
# Note: Kubernetes matches PSPs to deployments alphabetically. In some environments, this PSP may
# need to be renamed with a value that will match before others.
name: 00-rook-privileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: true
allowedCapabilities:
# required by CSI
- SYS_ADMIN
# fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group
fsGroup:
rule: RunAsAny
# runAsUser, supplementalGroups - Rook needs to run some pods as root
# Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
# seLinux - seLinux context is unknown ahead of time; set if this is well-known
seLinux:
rule: RunAsAny
volumes:
# recommended minimum set
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- secret
- projected
# required for Rook
- hostPath
- flexVolume
# allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known
# allowedHostPaths:
# - pathPrefix: "/run/udev" # for OSD prep
# readOnly: false
# - pathPrefix: "/dev" # for OSD prep
# readOnly: false
# - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to
# readOnly: false
# Ceph requires host IPC for setting up encrypted devices
hostIPC: true
# Ceph OSDs need to share the same PID namespace
hostPID: true
# hostNetwork can be set to 'false' if host networking isn't used
hostNetwork: true
hostPorts:
# Ceph messenger protocol v1
- min: 6789
max: 6790 # <- support old default port
# Ceph messenger protocol v2
- min: 3300
max: 3300
# Ceph RADOS ports for OSDs, MDSes
- min: 6800
max: 7300
# # Ceph dashboard port HTTP (not recommended)
# - min: 7000
# max: 7000
# Ceph dashboard port HTTPS
- min: 8443
max: 8443
# Ceph mgr Prometheus Metrics
- min: 9283
max: 9283
# OLM: END CLUSTER POD SECURITY POLICY
# OLM: BEGIN POD SECURITY POLICY BINDINGS
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: 'psp:rook'
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- 00-rook-privileged
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-ceph-system-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: rook-ceph
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-default-psp
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: default
namespace: rook-ceph
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-osd-psp
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: rook-ceph
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-mgr-psp
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: rook-ceph
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-cmd-reporter-psp
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: rook-ceph-cmd-reporter
namespace: rook-ceph
# OLM: END CLUSTER POD SECURITY POLICY BINDINGS
# OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-cephfs-plugin-sa
namespace: rook-ceph
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-cephfs-provisioner-sa
namespace: rook-ceph
# OLM: END CSI CEPHFS SERVICE ACCOUNT
# OLM: BEGIN CSI CEPHFS ROLE
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: rook-ceph
name: cephfs-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
# OLM: END CSI CEPHFS ROLE
# OLM: BEGIN CSI CEPHFS ROLEBINDING
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role-cfg
namespace: rook-ceph
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: rook-ceph
roleRef:
kind: Role
name: cephfs-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
# OLM: END CSI CEPHFS ROLEBINDING
# OLM: BEGIN CSI CEPHFS CLUSTER ROLE
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
# OLM: END CSI CEPHFS CLUSTER ROLE
# OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-cephfs-plugin-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-plugin-sa
namespace: rook-ceph
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-cephfs-provisioner-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: rook-ceph
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-plugin-sa
namespace: rook-ceph
roleRef:
kind: ClusterRole
name: cephfs-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: rook-ceph
roleRef:
kind: ClusterRole
name: cephfs-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
# OLM: END CSI CEPHFS CLUSTER ROLEBINDING
# OLM: BEGIN CSI RBD SERVICE ACCOUNT
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-rbd-plugin-sa
namespace: rook-ceph
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-rbd-provisioner-sa
namespace: rook-ceph
# OLM: END CSI RBD SERVICE ACCOUNT
# OLM: BEGIN CSI RBD ROLE
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: rook-ceph
name: rbd-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
# OLM: END CSI RBD ROLE
# OLM: BEGIN CSI RBD ROLEBINDING
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
namespace: rook-ceph
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: rook-ceph
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
# OLM: END CSI RBD ROLEBINDING
# OLM: BEGIN CSI RBD CLUSTER ROLE
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete", "get", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
# OLM: END CSI RBD CLUSTER ROLE
# OLM: BEGIN CSI RBD CLUSTER ROLEBINDING
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-rbd-plugin-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-plugin-sa
namespace: rook-ceph
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-rbd-provisioner-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: rook-ceph
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-plugin-sa
namespace: rook-ceph
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: rook-ceph
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
# OLM: END CSI RBD CLUSTER ROLEBINDING
#!/bin/sh -xe
_DIR=$(dirname `readlink -f $0`)
sudo snap install lxd
cat ${_DIR}/lxd-ceph-preseed.yaml | lxd init --preseed
cat ${_DIR}/default.yaml | lxc edit profile default
# Note: If not using an apt-http-proxy then remove references to it.
LXDBR_IP=$(lxc network get lxdbr0 ipv4.address | head -c-4)
juju bootstrap --debug --no-gui --model-default ${_DIR}/etc/juju-config/model-default.yaml --model-default apt-http-proxy=http://${LXDBR_IP}:8000 localhost lxd-localhost
cat ${_DIR}/default.yaml | lxd edit profile juju-default
JUJU_MODEL=${1:-'lxd-localhost:default'}
_juju_model_arg="${JUJU_MODEL:+--model=$JUJU_MODEL}"
juju deploy $_juju_model_arg ./bundle-openstack.yaml
# for compat w/ stsstack bundles
cat <<EOF > ~/novarc
_juju_model_arg="${_juju_model_arg}" . ${_DIR}/novarc
EOF
[ -d ~/images ] || ln -s ${_DIR}/simplestreams/images ~/images
# configure ceph-proxy
./configure_ceph_proxy.sh
# wait for model to converge
# mostly works by accident
while $(juju status $_juju_model_arg | grep -q 'blocked\|maintenance\|waiting\|executing'); do
sleep 150
done
if $(juju status $_juju_model_arg | grep 'error\|unknown\|terminanted'); then
echo "Promblem with the model. Review before continuing"
fi
# prevent overcloud models from from wedgeing the setup by limiting the CPU consumption of nova-compute under load
lxc config set $(juju status --format json | jq -r ".machines[\"$(juju status --format json | jq -r '.applications["nova-compute"]["units"]["nova-compute/0"]["machine"]')\"][\"instance-id\"]") limits.cpu.allowance 90%
juju run-action glance-simplestreams-sync/0 sync-images --wait
./configure_openstack.sh $JUJU_MODEL
./juju_bootstrap.sh uc $JUJU_MODEL
cat <<EOF
This install should be compatiable w/ stsstack bundles.
Remember to set NAMESERVER and SWIFT_IP to relevant values before calling ./configure in stsstack bundles.
EOF
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment