Skip to content

Instantly share code, notes, and snippets.

@likid0
Last active October 20, 2022 10:51
Show Gist options
  • Save likid0/1b52631ff5d0d649a22a3f30106ccea7 to your computer and use it in GitHub Desktop.
Save likid0/1b52631ff5d0d649a22a3f30106ccea7 to your computer and use it in GitHub Desktop.
RHCS on All Flash Cluster : Performance Blog Series : ceph.conf template file
### PLEASE TAKE IN ACCOUNT THIS FILE IS CONFIGURED FOR BECHNMARK TESTING OF CEPH NOT PRODUCTION USE
[client]
rbd cache = False
[client.openstack]
admin socket = /var/run/openstack/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/ceph/qemu-guest-$pid.log
# Please do not change this file directly since it is managed by Ansible and will be overwritten
[global]
# let's force the admin socket the way it was so we can properly check for existing instances
# also the line $cluster-$name.$pid.$cctid.asok is only needed when running multiple instances
# of the same daemon, thing ceph-ansible cannot do at the time of writing
admin socket = $run_dir/$cluster-$name.asok
auth client required = none
auth cluster required = none
auth service required = none
auth supported = none
cephx require signatures = False
cephx sign messages = False
cluster network = 1.1.1.0/24
debug asok = 0/0
debug auth = 0/0
debug bdev = 0/0
debug bluefs = 0/0
debug bluestore = 0/0
debug buffer = 0/0
debug civetweb = 0/0
debug client = 0/0
debug compressor = 0/0
debug context = 0/0
debug crush = 0/0
debug crypto = 0/0
debug dpdk = 0/0
debug eventtrace = 0/0
debug filer = 0/0
debug filestore = 0/0
debug finisher = 0/0
debug fuse = 0/0
debug heartbeatmap = 0/0
debug javaclient = 0/0
debug journal = 0/0
debug journaler = 0/0
debug kinetic = 0/0
debug kstore = 0/0
debug leveldb = 0/0
debug lockdep = 0/0
debug mds = 0/0
debug mds balancer = 0/0
debug mds locker = 0/0
debug mds log = 0/0
debug mds log expire = 0/0
debug mds migrator = 0/0
debug memdb = 0/0
debug mgr = 0/0
debug mgrc = 0/0
debug mon = 0/0
debug monc = 0/00
debug ms = 0/0
debug none = 0/0
debug objclass = 0/0
debug objectcacher = 0/0
debug objecter = 0/0
debug optracker = 0/0
debug osd = 0/0
debug paxos = 0/0
debug perfcounter = 0/0
debug rados = 0/0
debug rbd = 0/0
debug rbd mirror = 0/0
debug rbd replay = 0/0
debug refs = 0/0
debug reserver = 0/0
debug rgw = 0/0
debug rocksdb = 0/0
debug striper = 0/0
debug throttle = 0/0
debug timer = 0/0
debug tp = 0/0
debug xio = 0/0
fsid = 66f14b44-1e42-4869-9ebb-cbe0b0c4053d
log file = /var/log/ceph/$cluster-$type-$id.log
max open files = 131072
mon compact on trim = False
mon host = 10.48.XX.14,10.48.XX.32,10.48.XX.16
mon initial members = ceph04,ceph05,ceph06
osd deep scrub interval = 137438953472
osd max scrubs = 16
osd objectstore = bluestore
osd op threads = 2
osd pool default min size = 1
osd pool default size = 2
osd scrub load threshold = 0.01
osd scrub max interval = 137438953472
osd scrub min interval = 137438953472
perf = True
public network = 10.48.22.0/24
rbd readahead disable after bytes = 0
rbd readahead max bytes = 4194304
rocksdb perf = True
throttler perf counter = False
[mon]
mon allow pool delete = True
mon health preluminous compat = True
mon osd down out interval = 300
[osd]
bluestore cache autotune = 0
bluestore cache kv ratio = 0.2
bluestore cache meta ratio = 0.8
bluestore cache size ssd = 8G
bluestore csum type = none
bluestore extent map shard max size = 200
bluestore extent map shard min size = 50
bluestore extent map shard target size = 100
bluestore rocksdb options = compression=kNoCompression,max_write_buffer_number=32,min_write_buffer_number_to_merge=2,recycle_log_file_num=32,compaction_style=kCompactionStyleLevel,write_buffer_size=67108864,target_file_size_base=67108864,max_background_compactions=31,level0_file_num_compaction_trigger=8,level0_slowdown_writes_trigger=32,level0_stop_writes_trigger=64,max_bytes_for_level_base=536870912,compaction_threads=32,max_bytes_for_level_multiplier=8,flusher_threads=8,compaction_readahead_size=2MB
osd map share max epochs = 100
osd max backfills = 5
osd memory target = 4294967296
osd op num shards = 8
osd op num threads per shard = 2
osd min pg log entries = 10
osd max pg log entries = 10
osd pg log dups tracked = 10
osd pg log trim min = 10
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment