Skip to content

Instantly share code, notes, and snippets.

@mrngm
Created December 10, 2019 12:30
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mrngm/bba6ffdc545bfa52ebf79d6d2c002a6d to your computer and use it in GitHub Desktop.
Save mrngm/bba6ffdc545bfa52ebf79d6d2c002a6d to your computer and use it in GitHub Desktop.
root@node1:~# ceph versions
{
"mon": {
"ceph version 14.2.4 (75f4de193b3ea58512f204623e6c5a16e6c1e1ba) nautilus (stable)": 3
},
"mgr": {
"ceph version 14.2.4 (75f4de193b3ea58512f204623e6c5a16e6c1e1ba) nautilus (stable)": 2
},
"osd": {
"ceph version 14.2.4 (75f4de193b3ea58512f204623e6c5a16e6c1e1ba) nautilus (stable)": 4
},
"mds": {},
"rgw": {
"ceph version 14.2.4 (75f4de193b3ea58512f204623e6c5a16e6c1e1ba) nautilus (stable)": 1
},
"overall": {
"ceph version 14.2.4 (75f4de193b3ea58512f204623e6c5a16e6c1e1ba) nautilus (stable)": 10
}
}
root@node1:~# ceph -s
cluster:
id: 77b0f639-26c6-4d18-a41d-90599c28ca05
health: HEALTH_OK
services:
mon: 3 daemons, quorum node1,node3,node5 (age 12d)
mgr: node2(active, since 6d), standbys: node4
osd: 4 osds: 4 up (since 12d), 4 in (since 12d)
rgw: 1 daemon active (node1)
data:
pools: 7 pools, 296 pgs
objects: 229 objects, 192 KiB
usage: 3.2 GiB used, 6.8 GiB / 10 GiB avail
pgs: 296 active+clean
root@node1:~# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.00970 root default
-16 0.00970 datacenter nijmegen
-3 0.00388 host node2
0 hdd 0.00388 osd.0 up 1.00000 1.00000
-5 0.00388 host node3
1 hdd 0.00388 osd.1 up 1.00000 1.00000
-7 0.00098 host node4
2 ssd 0.00098 osd.2 up 1.00000 1.00000
-9 0.00098 host node5
3 ssd 0.00098 osd.3 up 1.00000 1.00000
root@node1:~# ceph osd pool ls detail
pool 1 'tier1-ssd' replicated size 2 min_size 1 crush_rule 1 object_hash rjenkins pg_num 128 pgp_num 128 autoscale_mode warn last_change 110 lfor 0/0/57 flags hashpspool stripe_width 0 target_size_bytes 900000000 application rgw
pool 2 'tier2-hdd' replicated size 1 min_size 1 crush_rule 2 object_hash rjenkins pg_num 128 pgp_num 128 autoscale_mode warn last_change 111 lfor 0/0/60 flags hashpspool stripe_width 0 target_size_bytes 2800000000 application rgw
pool 3 '.rgw.root' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 79 flags hashpspool stripe_width 0 application rgw
pool 4 'default.rgw.control' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 100 flags hashpspool stripe_width 0 application rgw
pool 5 'default.rgw.meta' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 95 flags hashpspool stripe_width 0 application rgw
pool 6 'default.rgw.log' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 92 flags hashpspool stripe_width 0 application rgw
pool 7 'default.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 109 flags hashpspool stripe_width 0 application rgw
root@node1:~# ceph osd crush rule dump
[
{
"rule_id": 0,
"rule_name": "replicated_rule",
"ruleset": 0,
"type": 1,
"min_size": 1,
"max_size": 10,
"steps": [
{
"op": "take",
"item": -1,
"item_name": "default"
},
{
"op": "chooseleaf_firstn",
"num": 0,
"type": "host"
},
{
"op": "emit"
}
]
},
{
"rule_id": 1,
"rule_name": "nijmegen-ssd-dup",
"ruleset": 1,
"type": 1,
"min_size": 1,
"max_size": 10,
"steps": [
{
"op": "take",
"item": -18,
"item_name": "nijmegen~ssd"
},
{
"op": "chooseleaf_firstn",
"num": 0,
"type": "host"
},
{
"op": "emit"
}
]
},
{
"rule_id": 2,
"rule_name": "nijmegen-hdd-dup",
"ruleset": 2,
"type": 1,
"min_size": 1,
"max_size": 10,
"steps": [
{
"op": "take",
"item": -17,
"item_name": "nijmegen~hdd"
},
{
"op": "chooseleaf_firstn",
"num": 0,
"type": "host"
},
{
"op": "emit"
}
]
}
]
root@node1:~# ceph osd pool application get
{
"tier1-ssd": {
"rgw": {}
},
"tier2-hdd": {
"rgw": {}
},
".rgw.root": {
"rgw": {}
},
"default.rgw.control": {
"rgw": {}
},
"default.rgw.meta": {
"rgw": {}
},
"default.rgw.log": {
"rgw": {}
},
"default.rgw.buckets.index": {
"rgw": {}
}
}
root@node1:~# radosgw-admin zonegroup placement list
[
{
"key": "default-placement",
"val": {
"name": "default-placement",
"tags": [],
"storage_classes": [
"SPINNING_RUST",
"STANDARD"
]
}
}
]
root@node1:~# radosgw-admin zone placement list
[
{
"key": "default-placement",
"val": {
"index_pool": "default.rgw.buckets.index",
"storage_classes": {
"SPINNING_RUST": {
"data_pool": "tier2-hdd"
},
"STANDARD": {
"data_pool": "tier1-ssd"
}
},
"data_extra_pool": "default.rgw.buckets.non-ec",
"index_type": 0
}
}
]
root@node1:~# rados -p tier1-ssd ls
ce2fc9ee-edc8-4dc7-a3fe-b1458c67168b.5805.1_darthvader.png
ce2fc9ee-edc8-4dc7-a3fe-b1458c67168b.5805.1_2019-10-15-090436_1254x522_scrubbed.png
ce2fc9ee-edc8-4dc7-a3fe-b1458c67168b.5805.1_kanariepiet.jpg
root@node1:~# rados -p tier2-hdd ls
ce2fc9ee-edc8-4dc7-a3fe-b1458c67168b.5805.1__shadow_.FEruUOZaVJXJcOG-e2tO1xcInNzoEvN_0
client $ s3cmd info s3://bucket/kanariepiet.jpg
s3://bucket/kanariepiet.jpg (object):
File size: 37839
Last mod: Tue, 10 Dec 2019 08:09:58 GMT
MIME type: image/jpeg
Storage: STANDARD
MD5 sum: c67af22181135db6e3c547f7a31db738
SSE: none
Policy: none
CORS: none
ACL: tst: FULL_CONTROL
client $ s3cmd info s3://bucket/darthvader.png
s3://bucket/darthvader.png (object):
File size: 156591
Last mod: Wed, 04 Dec 2019 10:35:14 GMT
MIME type: image/png
Storage: SPINNING_RUST
MD5 sum: ce44a43a01ab7d512685902174b65065
SSE: none
Policy: none
CORS: none
ACL: tst: FULL_CONTROL
client $ s3cmd info s3://bucket/2019-10-15-090436_1254x522_scrubbed.png
s3://bucket/2019-10-15-090436_1254x522_scrubbed.png (object):
File size: 256
Last mod: Tue, 10 Dec 2019 10:33:24 GMT
MIME type: binary/octet-stream
Storage: STANDARD
MD5 sum: 39cef8e6027e9d69a025175c9ea076a0
SSE: none
Policy: none
CORS: none
ACL: tst: FULL_CONTROL
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment