Create /etc/ceph/ceph.conf
[global]
auth cluster required = none
auth service required = none
auth client required = none
[osd]
osd journal size = 1000
filestore xattr use omap = true
[osd.0]
host = n54l
[osd.1]
host = n54l
[osd.2]
host = n54l
[osd.3]
host = n54l
[mon]
mon debug dump transactions = false
[mon.a]
host = n54l
mon addr = 192.168.1.10:6789
[mds.a]
host = n54l
mkdir -p /var/run/ceph mkdir -p /var/lib/ceph/mon/ceph-a # dummy keyring touch /etc/ceph/keyring touch /var/lib/ceph/mon/ceph-a/keyring ceph-mon -i a --mkfs -f --fsid `uuidgen`
# NOTE, append -f to debug ceph-mon -i a
current 4 hdds, assume is /dev/sdW, /dev/sdX, /dev/sdY and /dev/sdZ
using ext4, and set lable to osd0~osd3
# use fdisk to create 1 partition mkfs.ext4 -m 0 -b 4096 -L osdN /dev/sdX1
LABEL=osd0 /var/lib/ceph/osd/ceph-0 ext4 noatime,user_xattr 0 1
LABEL=osd1 /var/lib/ceph/osd/ceph-1 ext4 noatime,user_xattr 0 1
LABEL=osd2 /var/lib/ceph/osd/ceph-2 ext4 noatime,user_xattr 0 1
LABEL=osd3 /var/lib/ceph/osd/ceph-3 ext4 noatime,user_xattr 0 1
NOTE: with ceph-mon running ceph osd create ceph osd create ceph osd create ceph osd create
ceph-osd -i 0 --mkfs --mkjournal -f ceph-osd -i 1 --mkfs --mkjournal -f ceph-osd -i 2 --mkfs --mkjournal -f ceph-osd -i 3 --mkfs --mkjournal -f
commands
ceph osd getcrushmap -o crush.bin
crushtool -d crush.bin -o crush.txt
# edit cursh.txt to something like below file
crushtool -c crush.txt -o crush.bin
ceph osd setcrushmap -i crush.bin
final crush
# begin crush map
# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
# types
type 0 osd
type 1 host
type 2 rack
type 3 row
type 4 room
type 5 datacenter
type 6 root
# buckets
host n54l {
id -2
alg straw
hash 0
item osd.0 weight 2.0
item osd.1 weight 2.0
item osd.2 weight 2.0
item osd.3 weight 2.0
}
root default {
id -1 # do not change unnecessarily
# weight 0.000
alg straw
hash 0 # rjenkins1
item n54l weight 8.0
}
# rules
rule data {
ruleset 0
type replicated
min_size 1
max_size 3
step take default
step chooseleaf firstn 0 type osd
step emit
}
rule metadata {
ruleset 1
type replicated
min_size 2
max_size 4
step take default
step chooseleaf firstn 0 type osd
step emit
}
rule rbd {
ruleset 2
type replicated
min_size 1
max_size 3
step take default
step chooseleaf firstn 0 type osd
step emit
}
rule data_r3 {
ruleset 3
type replicated
min_size 2
max_size 4
step take default
step chooseleaf firstn 0 type osd
step emit
}
rule rbd_r3 {
ruleset 4
type replicated
min_size 2
max_size 4
step take default
step chooseleaf firstn 0 type osd
step emit
}
# end crush map
ceph mds newfs <id-of-metadata-pool> <id-of-data-pool> –yes-i-really-mean-it
ceph-mds -i a
after all setup, add ceph to default service.
NOTE, with ceph-mon running ref: http://ceph.com/docs/master/rados/operations/authentication/
ceph auth get-or-create client.admin \ mds 'allow' osd 'allow *' mon 'allow *' > /etc/ceph/keyring # for mon ceph-authtool --create-keyring --gen-key -n mon. /tmp/mon.key # NOTE, copy to all mon host cp /tmp/mon.key /var/lib/ceph/mon/ceph-a/keyring rm /tmp/mon.key # for every osd for id in `seq 0 3`; do mkdir -p /var/lib/ceph/osd/ceph-${id} ceph auth get-or-create osd.${id} mon \ 'allow rwx' osd 'allow *' -o /var/lib/ceph/osd/ceph-${id}/keyring done # for mds.a mkdir -p /var/lib/ceph/mds/ceph-a ceph auth get-or-create mds.a mon \ 'allow rwx' osd 'allow *' mds 'allow *' \ -o /var/lib/ceph/mds/ceph-a/keyring
Then, edit /etc/ceph/ceph.conf, change ‘none’ to ‘cephx’, and restart ceph-mon
For data and rbd need replicate 2 copy.
already add in setup step
# both pool with 64 pg ceph osd pool create data_r3 64 ceph osd pool create rbd_r3 64 ceph osd pool create data_tmp 64 ceph osd pool create rbd_tmp 64 # set rep size to 3 ceph osd pool set data_r3 size 3 ceph osd pool set rbd_r3 size 3 # set tmp pool size to 1 ceph osd pool set data_tmp size 1 ceph osd pool set rbd_tmp size 1 # add data_r3 pool as cephfs ceph mds add_data_pool <pool-id-of-data_r3> ceph mds add_data_pool <pool-id-of-data_tmp>
ref: http://ceph.com/docs/master/rados/operations/add-or-rm-mons/
First of all, ssh to the host that new monitor will run on.
# first of all, if auth is disabled, create a emtpy file
touch /etc/ceph/keyring
# create dir
mkdir -p /var/lib/ceph/mon/ceph-{mon-id}
cd /var/lib/ceph/mon/ceph-{mon-id}
touch keyring
# create a tmp dir for temp files
mkdir tmp
# this step may fail if auth is disabled
ceph auth get mon. -o tmp/auth
ceph mon getmap -o tmp/map
# create fs, if auth is disabled, remove keyring parameter
ceph-mon -i {mon-id} --mkfs --monmap tmp/map --keyring tmp/auth
Then, edit /etc/ceph/ceph.conf, add a mon.{mon-id} entry
[mon.{mon-id}]
host = new-mon-host
addr = ip-addr:6789
And, add new monitor to current ceph cluster, and start the monitor
ceph mon add {mon-id} {ip}[:{port}]
ceph-mon -i {mon-id} --public-addr {ip:port}
ref http://ceph.com/docs/master/rados/operations/add-or-rm-osds Assume the osd need to replace is osd.{osd_num}, disk is /dev/sdX
First of all, mark osd out and delete from crush
ceph osd out {osd_num}
ceph osd crush remove osd.{osd_num}
# use below command to wait ceph become clean again
ceph -w
Now delete osd itself, NOTE: this step is important.
ceph osd rm osd.{osd_num}
Then stop osd daemon.
# for Gentoo
/etc/init.d/ceph-osd.{osd_num} stop
Now, we should umount /dev/sdX1, then stop disk using ‘scsiadd -r’, and replace with new disk.
First, we create a new osd, no surprisingly, the osd number should be the same as before.
ceph osd create
Then format new disk, see format, then mount on /var/lib/ceph/osd/ceph-{osd_num}, and create fs layout, NOTE: should mount with -o user_xattr
ceph-osd -i {osd_num} --mkfs
Then register osd auth key as necessary.
Now, add osd to crush
ceph osd crush add osd.{osd_num} {weight} {location} # for example: ceph osd crush add osd.1 2 root=default host=n54l # where osd.1 is osd name, 2 is weight, root=default host=n54l is location # and use command 'ceph osd tree' to dump current osd tree
Start osd now(for Gentoo system):
/etc/init.d/ceph-osd.1 start