- Cluster 3EA (8G RAM↑)
- Kubernetes 1.19+
- Helm 3.2.0+
- lvm2 pacakge (
apt install lvm2 -y
) - 각 vm 마다 추가 vdb 디스크 마운트 (파티션 X)
참고 : 추가 디스크 마운트는 필수 조건이 아니며, 기본 sda 또는 vda 로 설정해도 설치됨
ceph operator
helm repo add rook-release https://charts.rook.io/release
helm repo update
helm install rook-ceph rook-release/rook-ceph \
--create-namespace \
--namespace rook-ceph \
--set csi.nfs.enabled=true \
--set enableDiscoveryDaemon=true
ceph cluster
Virtrual Machine 인 경우, deviceFilter='vd[a-z]'로 수정
모니터링의 경우에는 ceph 구성 후, prometheus 설치 후 true 로 업데이트할 것
helm install rook-ceph-cluster rook-release/rook-ceph-cluster \
--create-namespace \
--namespace rook-ceph \
--set operatorNamespace=rook-ceph \
--set toolbox.enabled=true \
--set monitoring.enabled=false \
--set cephClusterSpec.storage.useAllNodes=true,cephClusterSpec.storage.useAllDevices=true \
--set cephClusterSpec.storage.deviceFilter='sd[a-z]'
각 노드별로 osd 배포 시
helm install rook-ceph-cluster rook-release/rook-ceph-cluster \
--create-namespace \
--namespace rook-ceph \
--set operatorNamespace=rook-ceph \
--set toolbox.enabled=true \
--set monitoring.enabled=false \
--set cephClusterSpec.storage.useAllNodes=false,cephClusterSpec.storage.useAllDevices=false \
--set cephClusterSpec.storage.deviceFilter='vdb' \
--set cephClusterSpec.storage.nodes[0].name=node1 \
--set cephClusterSpec.storage.nodes[0].devices[0].name=vdb \
--set cephClusterSpec.storage.nodes[0].devices[0].config.deviceClass=hdd \
--set cephClusterSpec.storage.nodes[0].devices[0].config.storeType=bluestore \
--set cephClusterSpec.storage.nodes[1].name=node2 \
--set cephClusterSpec.storage.nodes[1].devices[0].name=vdb \
--set cephClusterSpec.storage.nodes[1].devices[0].config.deviceClass=hdd \
--set cephClusterSpec.storage.nodes[1].devices[0].config.storeType=bluestore \
--set cephClusterSpec.storage.nodes[2].name=node3 \
--set cephClusterSpec.storage.nodes[2].devices[0].name=vdb \
--set cephClusterSpec.storage.nodes[2].devices[0].config.deviceClass=hdd \
--set cephClusterSpec.storage.nodes[2].devices[0].config.storeType=bluestore \
--set cephClusterSpec.dashboard.port=8443 \
--set cephClusterSpec.dashboard.ssl=false
ceph-toolbox
- pod exec
kubectl exec -it pod/$(kubectl get pods -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') -n rook-ceph /bin/bash
ceph-dashboard-external 설정
cat <<'EOF' | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: rook-ceph-mgr-dashboard-external-https
namespace: rook-ceph
labels:
app: rook-ceph-mgr
rook_cluster: rook-ceph
spec:
ports:
- name: dashboard
port: 8443
protocol: TCP
targetPort: 8443
nodePort: 32070
selector:
app: rook-ceph-mgr
rook_cluster: rook-ceph
sessionAffinity: None
type: NodePort
EOF
로그인
- 아이디 : admin
- 비밀번호 :
kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo
Ceph orchestrator 설정
TOOLS_POD=pod/$(kubectl get pods -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}')
kubectl -n rook-ceph exec -it $TOOLS_POD -- ceph mgr module enable rook
kubectl -n rook-ceph exec -it $TOOLS_POD -- ceph orch set backend rook
kubectl -n rook-ceph exec -it $TOOLS_POD -- ceph orch status
CephNFS 설정
cat <<'EOF' | kubectl apply -f -
apiVersion: ceph.rook.io/v1
kind: CephNFS
metadata:
# The name of Ganesha server cluster to create. It will be reflected in
# the name(s) of the ganesha server pod(s)
name: ceph-filesystem
# The namespace of the Rook cluster where the Ganesha server cluster is
# created.
namespace: rook-ceph
spec:
rados:
pool: ceph-filesystem-data0
namespace: nfs-ns
server:
active: 3
placement: {}
resources: {}
EOF
TOOLS_POD=pod/$(kubectl get pods -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}')
kubectl -n rook-ceph exec -it $TOOLS_POD -n rook-ceph -- ceph dashboard set-ganesha-clusters-rados-pool-namespace ceph-filesystem-data0/nfs-ns
kubectl -n rook-ceph exec -it $TOOLS_POD -- ceph dashboard feature status
마운트 방법
- centos (
yum install nfs-utils
) - ubuntu (
apt install nfs-common
)
하단 IP는 변경해서 사용
mount -t nfs -o port=2049 $(hostname -I | awk {'print $1'}):/cephfs /mnt/rook
API 사용 방법
- 주소 : https://192.168.160.235:32070/api/auth (
IP:PORT 는 본인 환경에 맞게 수정
) - 문서 : https://docs.ceph.com/en/latest/mgr/ceph_api/
curl -X POST "https://example.com:8443/api/auth" \
-H "Accept: application/vnd.ceph.api.v1.0+json" \
-H "Content-Type: application/json" \
-d '{"username": <username>, "password": <password>}'
Prometheus, Grafana 설정
TOOLS_POD=pod/$(kubectl get pods -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}')
kubectl -n rook-ceph exec -it $TOOLS_POD -- ceph dashboard set-prometheus-api-host http://prometheus-server.telemetry.svc.cluster.local
kubectl -n rook-ceph exec -it $TOOLS_POD -- ceph dashboard set-alertmanager-api-host http://prometheus-alertmanager.telemetry.svc.cluster.local:9093
kubectl -n rook-ceph exec -it $TOOLS_POD -- ceph dashboard set-grafana-api-url http://grafana.telemetry.svc.cluster.local
rook-ceph 관련 확인
kubectl api-resources --verbs=list --namespaced -o name \
| xargs -n 1 kubectl get --show-kind --ignore-not-found -n rook-ceph
정리 시
helm uninstall rook-ceph -n rook-ceph
helm uninstall rook-ceph-cluster -n rook-ceph
kubectl -n rook-ceph patch configmap/rook-ceph-mon-endpoints -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl -n rook-ceph patch secret/rook-ceph-mon -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl -n rook-ceph patch cephblockpool.ceph.rook.io/ceph-blockpool -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl -n rook-ceph patch cephcluster.ceph.rook.io/rook-ceph -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl -n rook-ceph patch cephfilesystem.ceph.rook.io/ceph-filesystem -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl -n rook-ceph patch cephnfs.ceph.rook.io/ceph-filesystem -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl -n rook-ceph patch cephobjectstore.ceph.rook.io/ceph-objectstore -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl delete ns rook-ceph --force
모든 노드에
DISK="/dev/vdb"
sgdisk --zap-all $DISK
dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync
blkdiscard $DISK
rm -rf /var/lib/rook
Single Node