(Should work with any storage class)
cat << EOF | kubectl apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-ceph-block-ext
# Change “rook-ceph” provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
clusterID: rook-ceph-external
# Ceph pool into which the RBD image shall be created
pool: <an avaailable pool>
# RBD image format. Defaults to “2”.
imageFormat: “2”
# RBD image features. Available for imageFormat: “2”. CSI RBD currently supports only `layering` feature.
imageFeatures: layering
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph-external
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph-external
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# Delete the rbd volume when a PVC is deleted
reclaimPolicy: Retain
allowVolumeExpansion: true
EOF
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ceph-ext
labels:
app: nginx
spec:
storageClassName: rook-ceph-block-ext
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
EOF
$ kubectl get pvc | grep ceph
ceph-ext Bound pvc-21318da7-4cba-4029-975c-4a3fbd9160a8 3Gi RWO rook-ceph-block-ext 13s
kubectl get pv | grep ceph
pvc-21318da7-4cba-4029-975c-4a3fbd9160a8 3Gi RWO Retain Bound default/ceph-ext rook-ceph-block-ext 6s
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: nginx-test
spec:
volumes:
- name: mystorage
persistentVolumeClaim:
claimName: ceph-ext
containers:
- name: task-pv-container
image: nginx:1.23.1
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: mystorage
EOF
$ kubectl exec -it nginx-test /bin/bash
root@nginx-test:/usr/share/nginx/html# echo "some data and unique number @@@1" >> /usr/share/nginx/html/alex.txt
root@nginx-test:/usr/share/nginx/html# ls
alex.txt lost+found root@nginx-test:/usr/share/nginx/html# cat /usr/share/nginx/html/alex.txt
some data and unique number @@@1
and keep in a safe place (alternately use Velero and Restic to take backup
kubectl get pvc ceph-ext -o yaml > backup-ceph-ext.yaml
kubectl delete pod nginx-test
pod "nginx-test" deleted
kubectl delete pvc ceph-ext
persistentvolumeclaim "ceph-ext" deleted
$ kubectl get pv | grep ceph
pvc-21318da7-4cba-4029-975c-4a3fbd9160a8 3Gi RWO Retain Released default/ceph-ext rook-ceph-block-ext 10m
Patch the PV
kubectl patch pv pvc-21318da7-4cba-4029-975c-4a3fbd9160a8 -p '{"spec":{"claimRef": null}}'
persistentvolume/pvc-21318da7-4cba-4029-975c-4a3fbd9160a8 patched
kubectl create -f backup-ceph-ext.yaml
Check
kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-ext Bound pvc-21318da7-4cba-4029-975c-4a3fbd9160a8 3Gi RWO rook-ceph-block-ext 5s
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: nginx-test
spec:
volumes:
- name: mystorage
persistentVolumeClaim:
claimName: ceph-ext
containers:
- name: task-pv-container
image: docker-registry-remote.artifactory-espoo1.int.net.nokia.com/nginx:1.23.1
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: mystorage
EOF
$ kubectl exec -it nginx-test /bin/bash
root@nginx-test:/usr/share/nginx/html# ls
alex.txt lost+found root@nginx-test:/usr/share/nginx/html# cat /usr/share/nginx/html/alex.txt
some data and unique number @@@1
Yes it is.