Skip to content

Instantly share code, notes, and snippets.

@TyeolRik
Last active July 12, 2022 03:49
Show Gist options
  • Save TyeolRik/796f013c77af7936ecc79b8c94afa9d8 to your computer and use it in GitHub Desktop.
Save TyeolRik/796f013c77af7936ecc79b8c94afa9d8 to your computer and use it in GitHub Desktop.
CEPH Installation in Closed Network Nodes with maintaining firewalld and selinux. OS: CentOS 8 Stream, CEPH Version: Pacific (v16.2.9)
#!/bin/bash
###########################
# Information
# Host 1 (node1): 172.22.4.101 - admin
# host 2 (node2): 172.22.4.102
# host 3 (node3): 172.22.4.103
# host 4 (node4): 172.22.4.104
# host 5 (node5): 172.22.4.105
###########################
###########################
# This section is for setup manually
DASHBOARD_USER_ID='admin'
DASHBOARD_USER_PASSWORD='qwer1234'
node1='172.22.4.101'
node2='172.22.4.102'
node3='172.22.4.103'
node4='172.22.4.104'
node5='172.22.4.105'
client='192.168.9.22'
adminName='node1'
adminIP=${node1}
all_hosts_name=( "node1" "node2" "node3" "node4" "node5" )
all_hosts_IP=( ${node1} ${node2} ${node3} ${node4} ${node5} )
slave_IPname=( "node2" "node3" "node4" "node5" )
slave_IPs=( ${node2} ${node3} ${node4} ${node5} )
mon_hosts_name=( "node1" "node2" "node3" "node4" "node5" )
mon_hosts_IP=( ${node1} ${node2} ${node3} ${node4} ${node5} )
mgr_hosts_name=( "node2" "node3" )
osd_hosts_name=( "node1" "node2" "node3" "node4" "node5" )
prometheus_hosts_name=( "node5" )
grafana_hosts_name=( "node4" )
nodeexporter_hosts_name=( "node1" "node2" "node3" "node4" "node5" )
mds_hosts_name=( "node3" "node4" "node5" )
rgw_hosts_name=( "node1" "node2" )
###########################
NC='\033[0m' # No Color
YELLOW='\033[1;33m'
alert() { # Helper to view message
printf "${YELLOW}$1${NC}\n"
}
alert_noNewline() { # Helper to view message
printf "${YELLOW}$1${NC}"
}
complete() {
printf "${YELLOW}Complete!\n\n${NC}"
}
for i in ${!slave_IPs[@]}
do
echo "${slave_IPs[$i]} ${slave_IPname[$i]}" >> /etc/hosts
done
dnf install -y expect # Command line automation tool
# Easy access to Another nodes(hosts)
expect -c "
spawn ssh-keygen -t rsa
expect \"Enter file in which to save the key\"
send \"\n\"
expect \"Enter passphrase\"
send \"\n\"
expect \"Enter same passphrase again\"
send \"\n\"
expect eof
"
for slaveIP in ${slave_IPs[@]}
do
expect -c "
set timeout 5
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@${slaveIP}
expect \"yes/no\"
send \"yes\n\"
expect \"password:\"
send \"$(cat /tmp/nodepw.txt)\n\"
expect eof
"
done
expect -c "
set timeout 5
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@${client}
expect \"yes/no\"
send \"yes\n\"
expect \"password:\"
send \"$(cat /tmp/clientpw.txt)\n\"
expect eof
"
rm -rf /tmp/ceph_install
# Make RPM Files for closed network
# 1. Add Repositories
cat <<EOF | tee /etc/yum.repos.d/ceph.repo >> /dev/null
[Ceph]
name=Ceph \$basearch
baseurl=https://download.ceph.com/rpm-16.2.5/el8/\$basearch
enabled=1
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
[Ceph-noarch]
name=Ceph noarch
baseurl=https://download.ceph.com/rpm-16.2.5/el8/noarch
enabled=1
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
[Ceph-source]
name=Ceph SRPMS
baseurl=https://download.ceph.com/rpm-16.2.5/el8/SRPMS
enabled=1
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
EOF
dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
# 2. Download all Packages with all dependencies
dnf update -y
dnf install -y git make podman openssl
dnf download -y --downloadonly --resolve --alldeps --downloaddir=/tmp/ceph_install/ceph_packages podman chrony lvm2 cephadm ceph-common
# 3. Download Podman Images as .tar
podman pull docker.io/ceph/ceph:v16.2.5
podman pull docker.io/ceph/ceph-grafana:6.7.4
podman pull docker.io/prom/prometheus:v2.37.0-rc.0
podman pull docker.io/prom/alertmanager:v0.24.0
podman pull docker.io/prom/node-exporter:v1.3.0
mkdir -p /tmp/ceph_install/ceph_podman_images/
podman save -o /tmp/ceph_install/ceph_podman_images/ceph.tar docker.io/ceph/ceph:v16.2.5
podman save -o /tmp/ceph_install/ceph_podman_images/ceph-grafana.tar docker.io/ceph/ceph-grafana:6.7.4
podman save -o /tmp/ceph_install/ceph_podman_images/prometheus.tar docker.io/prom/prometheus:v2.37.0-rc.0
podman save -o /tmp/ceph_install/ceph_podman_images/alertmanager.tar docker.io/prom/alertmanager:v0.24.0
podman save -o /tmp/ceph_install/ceph_podman_images/node-exporter.tar docker.io/prom/node-exporter:v1.3.0
# 4. Host local Podman Registry
cd /tmp/ &&\
git clone -b 5.0 https://github.com/tmax-cloud/install-registry.git &&\
cd install-registry &&\
sed -i "s/DNS.1 = localhost/DNS.1 = ${all_hosts_name[0]}/" trust/cert.conf &&\
sed -i "s/IP.1 = 127.0.0.1/IP.1 = ${adminIP}/" trust/cert.conf &&\
make trust <<EOF
KR
TmaxCloud
DevOps
${all_hosts_name[0]}
EOF
IP=${adminIP} PORT=5000 make install
sed -i 's/unqualified-search-registries = \[\"registry.fedoraproject.org\", \"registry.access.redhat.com\", \"registry.centos.org\", \"docker.io\"\]/unqualified-search-registries = \[\"172.22.4.101:5000\", \"registry.fedoraproject.org\", \"registry.access.redhat.com\", \"registry.centos.org\", \"docker.io\"\]/' /etc/containers/registries.conf
echo "[[registry]]" >> /etc/containers/registries.conf
echo 'location="172.22.4.101:5000"' >> /etc/containers/registries.conf
echo "insecure=true" >> /etc/containers/registries.conf
# 5. Upload images to registry
cd /tmp/ceph_install/ceph_podman_images/
podman load -i ceph.tar
podman load -i ceph-grafana.tar
podman load -i prometheus.tar
podman load -i alertmanager.tar
podman load -i node-exporter.tar
podman tag docker.io/ceph/ceph:v16.2.5 ${adminIP}:5000/ceph/ceph:v16.2.5
podman tag docker.io/ceph/ceph-grafana:6.7.4 ${adminIP}:5000/ceph/ceph-grafana:6.7.4
podman tag docker.io/prom/prometheus:v2.37.0-rc.0 ${adminIP}:5000/prom/prometheus:v2.37.0-rc.0
podman tag docker.io/prom/alertmanager:v0.24.0 ${adminIP}:5000/prom/alertmanager:v0.24.0
podman tag docker.io/prom/node-exporter:v1.3.0 ${adminIP}:5000/prom/node-exporter:v1.3.0
podman push ${adminIP}:5000/ceph/ceph:v16.2.5
podman push ${adminIP}:5000/ceph/ceph-grafana:6.7.4
podman push ${adminIP}:5000/prom/prometheus:v2.37.0-rc.0
podman push ${adminIP}:5000/prom/alertmanager:v0.24.0
podman push ${adminIP}:5000/prom/node-exporter:v1.3.0
# 6. Authentication for podman
htpasswd -Bbn admin qwer1234 > /etc/containers/registries.d/.htpasswd
cat <<EOF | tee /tmp/ceph_install/ceph_podman_images/registry_file >> /dev/null
{
"url":"${adminIP}:5000",
"username":"${DASHBOARD_USER_ID}",
"password":"${DASHBOARD_USER_PASSWORD}"
}
EOF
###############################
######## OFFLINE START ########
###############################
for slaveIP in ${slave_IPs[@]}
do
scp -r /tmp/ceph_install/ root@${slaveIP}:/tmp/
ssh -f root@${slaveIP} "\
echo 'false' > /tmp/ceph_ready_status.txt &&\
dnf install -y /tmp/ceph_install/ceph_packages/*.rpm &&\
sed -i 's/unqualified-search-registries = \[\"registry.fedoraproject.org\", \"registry.access.redhat.com\", \"registry.centos.org\", \"docker.io\"\]/unqualified-search-registries = \[\"172.22.4.101:5000\", \"registry.fedoraproject.org\", \"registry.access.redhat.com\", \"registry.centos.org\", \"docker.io\"\]/' /etc/containers/registries.conf &&\
echo '[[registry]]' >> /etc/containers/registries.conf &&\
echo 'location=\"172.22.4.101:5000\"' >> /etc/containers/registries.conf &&\
echo 'insecure=true' >> /etc/containers/registries.conf &&\
systemctl restart chronyd &&\
echo 'true' > /tmp/ceph_ready_status.txt
"
done
rm -f ~/cluster_spec.yaml
# Add hosts
for i in ${!slave_IPs[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: host
hostname: ${slave_IPname[$i]}
addr: ${slave_IPs[$i]}
EOF
done
# Service Customize
# service: mon
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: mon
service_id: mon
placement:
hosts:
EOF
for hostname in ${mon_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: mgr
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: mgr
service_id: mgr
placement:
hosts:
EOF
for hostname in ${mgr_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: osd
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: osd
service_id: default_drive_group
placement:
hosts:
EOF
for hostname in ${osd_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
data_devices:
all: true
EOF
# service: prometheus
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: prometheus
service_id: prometheus
placement:
hosts:
EOF
for hostname in ${prometheus_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: grafana
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: grafana
service_id: grafana
placement:
hosts:
EOF
for hostname in ${grafana_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: alertmanager
service_id: alertmanager
placement:
count: 3
EOF
# service: node-exporter
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: node-exporter
service_id: node-exporter
placement:
hosts:
EOF
for hostname in ${nodeexporter_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: mds
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: mds
service_id: myfs
placement:
hosts:
EOF
for hostname in ${mds_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
# service: rgw
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
---
service_type: rgw
service_id: rgw
placement:
hosts:
EOF
for hostname in ${rgw_hosts_name[@]}
do
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
- ${hostname}
EOF
done
cat <<EOF | tee -a ~/cluster_spec.yaml >> /dev/null
spec:
rgw_frontend_port: 8080
EOF
dnf install -y /tmp/ceph_install/ceph_packages/*.rpm
# Check all done
alert "Check slave nodes ready"
for i in ${!slave_IPs[@]}
do
alert_noNewline "${slave_IPname[$i]}(${slave_IPs[$i]}) "
until [[ $(ssh root@${slave_IPs[$i]} 'cat /tmp/ceph_ready_status.txt 2>/dev/null') = true ]]; do sleep 1; done
alert "is READY"
done
# All Hosts are ready.
podman restart registry
podman login -u admin -p qwer1234
cephadm --image ${adminIP}:5000/ceph/ceph:v16.2.5 bootstrap --mon-ip ${node1} --registry-json /tmp/ceph_install/ceph_podman_images/registry_file --apply-spec ~/cluster_spec.yaml
# Configure Images registry
ceph config set mgr mgr/cephadm/container_image_prometheus ${adminIP}:5000/prom/prometheus:v2.37.0-rc.0
ceph config set mgr mgr/cephadm/container_image_node_exporter ${adminIP}:5000/prom/node-exporter:v1.3.0
ceph config set mgr mgr/cephadm/container_image_alertmanager ${adminIP}:5000/prom/alertmanager:v0.24.0
ceph config set mgr mgr/cephadm/container_image_grafana ${adminIP}:5000/ceph/ceph-grafana:6.7.4
ceph orch redeploy alertmanager
ceph orch redeploy grafana
ceph orch redeploy prometheus
ceph orch redeploy node-exporter
for slaveIP in ${slave_IPs[@]}
do
ssh root@${slaveIP} "cephadm registry-login --registry-url 172.22.4.101:5000 --registry-username 'admin' --registry-password 'qwer1234'"
done
podman restart registry
# Setup Dashboard setting
dnf install -y ceph-mgr-dashboard # Install Ceph Dashboard
ceph mgr module enable dashboard
ceph mgr services # Watch IP and Port Number of Dashboard
ceph dashboard ac-user-delete admin
rm -f ~/password.txt
echo "${DASHBOARD_USER_PASSWORD}" >> ~/password.txt &&\
# ceph dashboard ac-user-create USERNAME [PASSWORD] [ROLENAME] [NAME] [EMAIL]
ceph dashboard ac-user-create admin -i ~/password.txt administrator admin john@example.com # Set User ID and Password.
# CEPH RBD (Block Device)
poolname='mypool'
block_device_user_name='tyeolrik'
imagename='my_default_image'
ceph osd pool create ${poolname}
rbd pool init ${poolname}
ceph auth get-or-create client.${block_device_user_name} mon 'profile rbd' osd "profile rbd pool=${poolname}" mgr "profile rbd pool=${poolname}"
rbd create --size 102400 ${poolname}/${imagename} # 100 GB
rbd info ${poolname}/${imagename}
# CEPH CEPHFS
ceph osd pool create cephfs_data &&\
ceph osd pool create cephfs_metadata &&\
ceph fs new cephfs cephfs_metadata cephfs_data &&\
ceph fs ls
ceph mds stat
# Health warn -> Health OK
# Health warn reason: * daemons have recently crashed
# ceph crash archive-all
# Mount with key
IFS="=" read key value <<< $(cat /etc/ceph/ceph.client.admin.keyring | grep 'key')
mkdir -p /mnt/mycephfs/
mount -t ceph "$(echo $(echo ${mon_hosts_IP[@]}) | tr ' ' ',')":/ /mnt/mycephfs -o name=admin,secret="$value"
cd /mnt/mycephfs
echo "HELLO WORLD!" >> /mnt/mycephfs/testfile
cat /mnt/mycephfs/testfile
umount /mnt/mycephfs
cd ~
# CEPH RGW
read user access_key secret_key < <(echo $(radosgw-admin user create --uid=johndoe --display-name="Jone Doe" --email=john@example.com | jq -r '.keys[0].user, .keys[0].access_key, .keys[0].secret_key'))
# And go to client (192.168.9.22) terminal
ssh root@${client} "mkdir -p ~/.aws && printf '[default]\naws_access_key_id = ${access_key}\naws_secret_access_key = ${secret_key}' >> ~/.aws/credentials"
# Install s5cmd
ssh root@${client} "rm -f s5cmd* && dnf install -y git tar && wget https://github.com/peak/s5cmd/releases/download/v1.4.0/s5cmd_1.4.0_Linux-64bit.tar.gz && mkdir -p ~/s5cmd && tar -xvzf s5cmd_1.4.0_Linux-64bit.tar.gz -C ~/s5cmd/"
# Test Start
portNumber=$(ceph orch ls --format=json-pretty | jq -r '.[] | select(.service_type=="rgw") | .status.ports[0]')
ssh root@${client} "~/s5cmd/s5cmd --endpoint-url http://${node1}:${portNumber} mb s3://testbucket" # Make Bucket
ssh root@${client} "echo 'Hello TyeolRik' >> /tmp/testObject" # Make Test File
ssh root@${client} "~/s5cmd/s5cmd --endpoint-url http://${node1}:${portNumber} cp /tmp/testObject s3://testbucket" # Send File
ssh root@${client} "~/s5cmd/s5cmd --endpoint-url http://${node1}:${portNumber} cp s3://testbucket/testObject /tmp/objectFromS3" # Get File (Download)
ssh root@${client} "cat /tmp/objectFromS3" # Read File
#!/bin/bash
NC='\033[0m' # No Color
YELLOW='\033[1;33m'
alert() { # Helper to view message
printf "${YELLOW}$1${NC}\n"
}
complete() {
printf "${YELLOW}Complete!\n\n${NC}"
}
echo 'false' > /tmp/ceph_ready_status.txt
# Install need packages
# One of Requirements: Podman or Docker for running containers
alert "Install need packages" &&\
dnf update -y &&\
dnf install -y kernel-devel kernel-header* make gcc elfutils-libelf-devel git lvm2 epel-release tar httpd yum-utils jq podman &&\
complete
# Install CPEH - Pacific
# https://docs.ceph.com/en/quincy/cephadm/install/#distribution-specific-installations
dnf install -y centos-release-ceph-pacific &&\
dnf install -y cephadm &&\
cephadm add-repo --release pacific &&\
cephadm install ceph-common &&\
cephadm prepare-host &&\
ceph -v
echo 'true' > /tmp/ceph_ready_status.txt
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment