Skip to content

Instantly share code, notes, and snippets.

@TyeolRik
Last active September 27, 2023 05:48
Show Gist options
  • Save TyeolRik/f6155260f66fce41744c44f457f4b805 to your computer and use it in GitHub Desktop.
Save TyeolRik/f6155260f66fce41744c44f457f4b805 to your computer and use it in GitHub Desktop.
CEPH Installation with maintaining firewalld and selinux. OS: CentOS 8 Stream, CEPH Version: Pacific (v16.2.9)
#!/bin/bash
###########################
# Information
# Host 1 (node1): 172.22.4.101 - admin
# host 2 (node2): 172.22.4.102
# host 3 (node3): 172.22.4.103
# host 4 (node4): 172.22.4.104
# host 5 (node5): 172.22.4.105
###########################
###########################
# This section is for setup manually
DASHBOARD_USER_ID='admin'
DASHBOARD_USER_PASSWORD='qwer1234'
node1='172.22.4.101'
node2='172.22.4.102'
node3='172.22.4.103'
node4='172.22.4.104'
node5='172.22.4.105'
client='192.168.9.22'
all_hosts_name=( "node1" "node2" "node3" "node4" "node5" )
all_hosts_IP=( ${node1} ${node2} ${node3} ${node4} ${node5} )
slave_IPname=( "node2" "node3" "node4" "node5" )
slave_IPs=( ${node2} ${node3} ${node4} ${node5} )
mon_hosts_name=( "node1" "node2" "node3" "node4" "node5" )
mon_hosts_IP=( ${node1} ${node2} ${node3} ${node4} ${node5} )
mgr_hosts_name=( "node2" "node3" )
osd_hosts_name=( "node1" "node2" "node3" "node4" "node5" )
prometheus_hosts_name=( "node5" )
grafana_hosts_name=( "node4" )
nodeexporter_hosts_name=( "node1" "node2" "node3" "node4" "node5" )
mds_hosts_name=( "node3" "node4" "node5" )
rgw_hosts_name=( "node1" "node2" )
###########################
# SSH Background Command
# ssh -f root@172.22.4.102 "sh test.sh"
NC='\033[0m' # No Color
YELLOW='\033[1;33m'
alert() { # Helper to view message
printf "${YELLOW}$1${NC}\n"
}
alert_noNewline() { # Helper to view message
printf "${YELLOW}$1${NC}"
}
complete() {
printf "${YELLOW}Complete!\n\n${NC}"
}
dnf install -y expect # Command line automation tool
# Easy access to Another nodes(hosts)
expect -c "
spawn ssh-keygen -t rsa
expect \"Enter file in which to save the key\"
send \"\n\"
expect \"Enter passphrase\"
send \"\n\"
expect \"Enter same passphrase again\"
send \"\n\"
expect eof
"
for slaveIP in ${slave_IPs[@]}
do
expect -c "
set timeout 5
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@${slaveIP}
expect \"yes/no\"
send \"yes\n\"
expect \"password:\"
send \"$(cat /tmp/nodepw.txt)\n\"
expect eof
"
done
expect -c "
set timeout 5
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@${client}
expect \"yes/no\"
send \"yes\n\"
expect \"password:\"
send \"$(cat /tmp/clientpw.txt)\n\"
expect eof
"
for slaveIP in ${slave_IPs[@]}
do
ssh -f root@${slaveIP} "\
wget --no-check-certificate --no-cache --no-cookies https://gist.github.com/TyeolRik/f6155260f66fce41744c44f457f4b805/raw/CEPH_Install_For_Slaves.sh &&\
sh CEPH_Install_For_Slaves.sh &&\
rm -f CEPH_Install_For_Slaves.sh \
"
done
# Install need packages
# One of Requirements: Podman or Docker for running containers
alert "Install need packages" &&\
dnf update -y &&\
dnf install -y kernel-devel kernel-header* make gcc elfutils-libelf-devel git lvm2 epel-release tar httpd yum-utils jq podman chrony &&\
complete
# Install CPEH - Pacific
# https://docs.ceph.com/en/quincy/cephadm/install/#distribution-specific-installations
dnf install -y centos-release-ceph-pacific &&\
dnf install -y cephadm &&\
cephadm add-repo --release pacific &&\
cephadm install ceph-common &&\
cephadm prepare-host &&\
ceph -v
rm -f ~/cluster_spec.yaml
# Add hosts
for i in ${!slave_IPs[@]}
do
# ssh-copy-id -f -i /etc/ceph/ceph.pub root@${slave_IPs[$i]}
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: host
addr: ${slave_IPs[$i]}
hostname: ${slave_IPname[$i]}
EOF
done
# Service Customize
# service: mon
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: mon
service_id: mon
placement:
hosts:
EOF
for hostname in ${mon_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: mgr
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: mgr
service_id: mgr
placement:
hosts:
EOF
for hostname in ${mgr_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: osd
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: osd
service_id: default_drive_group
placement:
hosts:
EOF
for hostname in ${osd_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
data_devices:
all: true
EOF
# service: prometheus
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: prometheus
service_id: prometheus
placement:
hosts:
EOF
for hostname in ${prometheus_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: grafana
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: grafana
service_id: grafana
placement:
hosts:
EOF
for hostname in ${grafana_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: alertmanager
service_id: alertmanager
placement:
count: 3
EOF
# service: node-exporter
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: node-exporter
service_id: node-exporter
placement:
hosts:
EOF
for hostname in ${nodeexporter_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: mds
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: mds
service_id: myfs
placement:
hosts:
EOF
for hostname in ${mds_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: rgw
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: rgw
service_id: rgw
placement:
hosts:
EOF
for hostname in ${rgw_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# Check all done
alert "Check slave nodes ready"
for i in ${!slave_IPs[@]}
do
alert_noNewline "${slave_IPname[$i]}(${slave_IPs[$i]}) "
until [[ $(ssh root@${slave_IPs[$i]} 'cat /tmp/ceph_ready_status.txt 2>/dev/null') = true ]]; do sleep 1; done
alert "is READY"
done
# All Hosts are ready.
cephadm bootstrap --mon-ip ${node1} --apply-spec ~/cluster_spec.yaml
ceph orch apply osd --all-available-devices
# Setup Dashboard setting
dnf install -y ceph-mgr-dashboard # Install Ceph Dashboard
ceph mgr module enable dashboard
ceph mgr services # Watch IP and Port Number of Dashboard
ceph dashboard ac-user-delete admin
rm -f ~/password.txt
echo "${DASHBOARD_USER_PASSWORD}" >> ~/password.txt &&\
# ceph dashboard ac-user-create USERNAME [PASSWORD] [ROLENAME] [NAME] [EMAIL]
ceph dashboard ac-user-create admin -i ~/password.txt administrator admin john@example.com # Set User ID and Password.
# 정말 이해할 수 없으나, CEPH Dashboard에서 Object gateway 에 접근하기 위해서는,
# I can't really understand, however, for accessing Object gateway in dashboard,
# radosgw-admin user uid 가 admin 이어야 한다. 이유는 알 수 없다.
# "uid" of radosgw-admin user should be admin not dashboard or something else. I don't know why.
# radosgw-admin user create --uid=admin --display-name=admin --system
# ceph dashboard set-rgw-api-access-key -i accesskey # get accesskey from radosgw-admin user info --uid=admin
# ceph dashboard set-rgw-api-secret-key -i secretkey # get secretkey from radosgw-admin user info --uid=admin
# ceph dashboard set-rgw-api-admin-resource admin
ceph config set mon mon_allow_pool_delete true # ceph tell mon.\* injectargs '--mon-allow-pool-delete=true' # Pool Delete Mon 에서 가능
# CEPH RBD (Block Device)
poolname='mypool'
block_device_user_name='tyeolrik'
imagename='my_default_image'
ceph osd pool create ${poolname}
rbd pool init ${poolname}
ceph auth get-or-create client.${block_device_user_name} mon 'profile rbd' osd "profile rbd pool=${poolname}" mgr "profile rbd pool=${poolname}"
rbd create --size 1024 ${poolname}/${imagename} # 1 GB
rbd info ${poolname}/${imagename}
# CEPH CEPHFS
ceph osd pool create cephfs_data &&\
ceph osd pool create cephfs_metadata &&\
ceph fs new cephfs cephfs_metadata cephfs_data &&\
ceph fs ls
ceph mds stat
# Health warn -> Health OK
# Health warn reason: * daemons have recently crashed
# ceph crash archive-all
# Mount with key
IFS="=" read key value <<< $(cat /etc/ceph/ceph.client.admin.keyring | grep 'key')
mkdir -p /mnt/mycephfs/
mount -t ceph "$(echo $(echo ${mon_hosts_IP[@]}) | tr ' ' ',')":/ /mnt/mycephfs -o name=admin,secret="$value"
cd /mnt/mycephfs
echo "HELLO WORLD!" >> /mnt/mycephfs/testfile
cat /mnt/mycephfs/testfile
cd /mnt/
umount /mnt/mycephfs
cd ~
# CEPH RGW
read user access_key secret_key < <(echo $(radosgw-admin user create --uid=johndoe --display-name="Jone Doe" --email=john@example.com | jq -r '.keys[0].user, .keys[0].access_key, .keys[0].secret_key'))
# And go to client (192.168.9.22) terminal
ssh root@${client} "mkdir -p ~/.aws && printf '[default]\naws_access_key_id = ${access_key}\naws_secret_access_key = ${secret_key}' >> ~/.aws/credentials"
# Install s5cmd
ssh root@${client} "rm -rf s5cmd* && dnf install -y git tar && wget https://github.com/peak/s5cmd/releases/download/v1.4.0/s5cmd_1.4.0_Linux-64bit.tar.gz && mkdir -p ~/s5cmd && tar -xvzf s5cmd_1.4.0_Linux-64bit.tar.gz -C ~/s5cmd/"
# Test Start
portNumber=$(ceph orch ls --format=json-pretty | jq -r '.[] | select(.service_type=="rgw") | .status.ports[0]')
ssh root@${client} "~/s5cmd/s5cmd --endpoint-url http://${node1}:${portNumber} mb s3://testbucket" # Make Bucket
ssh root@${client} "echo 'Hello TyeolRik' >> /tmp/testObject" # Make Test File
ssh root@${client} "~/s5cmd/s5cmd --endpoint-url http://${node1}:${portNumber} cp /tmp/testObject s3://testbucket" # Send File
ssh root@${client} "~/s5cmd/s5cmd --endpoint-url http://${node1}:${portNumber} cp s3://testbucket/testObject /tmp/objectFromS3" # Get File (Download)
ssh root@${client} "cat /tmp/objectFromS3" # Read File
#!/bin/bash
NC='\033[0m' # No Color
YELLOW='\033[1;33m'
alert() { # Helper to view message
printf "${YELLOW}$1${NC}\n"
}
complete() {
printf "${YELLOW}Complete!\n\n${NC}"
}
echo 'false' > /tmp/ceph_ready_status.txt
# Time Sync
systemctl restart chronyd
# Easy access to Another nodes(hosts)
ssh-keygen -t rsa
for slaveIP in ${slave_IPs[@]}
do
ssh-copy-id -i ~/.ssh/id_rsa.pub root@${slaveIP}
done
# Install need packages
# One of Requirements: Podman or Docker for running containers
alert "Install need packages" &&\
dnf update -y &&\
dnf install -y kernel-devel kernel-header* make gcc elfutils-libelf-devel git lvm2 epel-release tar httpd yum-utils jq podman &&\
complete
# Install CPEH - Pacific
# https://docs.ceph.com/en/quincy/cephadm/install/#distribution-specific-installations
dnf install -y centos-release-ceph-pacific &&\
dnf install -y cephadm &&\
cephadm add-repo --release pacific &&\
cephadm install ceph-common &&\
cephadm prepare-host &&\
ceph -v
echo 'true' > /tmp/ceph_ready_status.txt
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment