Skip to content

Instantly share code, notes, and snippets.

@leseb
Created May 18, 2020 12:42
Show Gist options
  • Save leseb/6d3f92ed69e5fe1053894dacc93263b7 to your computer and use it in GitHub Desktop.
Save leseb/6d3f92ed69e5fe1053894dacc93263b7 to your computer and use it in GitHub Desktop.
# HELP ceph_health_status Cluster health status
# TYPE ceph_health_status untyped
ceph_health_status 1.0
# HELP ceph_mon_quorum_status Monitors in quorum
# TYPE ceph_mon_quorum_status gauge
ceph_mon_quorum_status{ceph_daemon="mon.a"} 1.0
# HELP ceph_fs_metadata FS Metadata
# TYPE ceph_fs_metadata untyped
# HELP ceph_mds_metadata MDS Metadata
# TYPE ceph_mds_metadata untyped
# HELP ceph_mon_metadata MON Metadata
# TYPE ceph_mon_metadata untyped
ceph_mon_metadata{ceph_daemon="mon.a",hostname="minikube",public_addr="10.106.31.93",rank="0",ceph_version="ceph version 15.2.1 (9fd2f65f91d9246fae2c841a6222d34d121680ee) octopus (stable)"} 1.0
# HELP ceph_mgr_metadata MGR metadata
# TYPE ceph_mgr_metadata gauge
ceph_mgr_metadata{ceph_daemon="mgr.a",hostname="minikube",ceph_version="ceph version 15.2.1 (9fd2f65f91d9246fae2c841a6222d34d121680ee) octopus (stable)"} 1.0
# HELP ceph_mgr_status MGR status (0=standby, 1=active)
# TYPE ceph_mgr_status gauge
ceph_mgr_status{ceph_daemon="mgr.a"} 1.0
# HELP ceph_mgr_module_status MGR module status (0=disabled, 1=enabled, 2=auto-enabled)
# TYPE ceph_mgr_module_status gauge
ceph_mgr_module_status{name="alerts"} 0.0
ceph_mgr_module_status{name="balancer"} 2.0
ceph_mgr_module_status{name="cephadm"} 0.0
ceph_mgr_module_status{name="crash"} 2.0
ceph_mgr_module_status{name="dashboard"} 1.0
ceph_mgr_module_status{name="devicehealth"} 2.0
ceph_mgr_module_status{name="diskprediction_local"} 0.0
ceph_mgr_module_status{name="influx"} 0.0
ceph_mgr_module_status{name="insights"} 0.0
ceph_mgr_module_status{name="iostat"} 1.0
ceph_mgr_module_status{name="k8sevents"} 0.0
ceph_mgr_module_status{name="localpool"} 0.0
ceph_mgr_module_status{name="orchestrator"} 2.0
ceph_mgr_module_status{name="osd_support"} 2.0
ceph_mgr_module_status{name="pg_autoscaler"} 2.0
ceph_mgr_module_status{name="progress"} 2.0
ceph_mgr_module_status{name="prometheus"} 1.0
ceph_mgr_module_status{name="rbd_support"} 2.0
ceph_mgr_module_status{name="restful"} 1.0
ceph_mgr_module_status{name="rook"} 1.0
ceph_mgr_module_status{name="selftest"} 0.0
ceph_mgr_module_status{name="status"} 2.0
ceph_mgr_module_status{name="telegraf"} 0.0
ceph_mgr_module_status{name="telemetry"} 2.0
ceph_mgr_module_status{name="test_orchestrator"} 0.0
ceph_mgr_module_status{name="volumes"} 2.0
ceph_mgr_module_status{name="zabbix"} 0.0
# HELP ceph_mgr_module_can_run MGR module runnable state i.e. can it run (0=no, 1=yes)
# TYPE ceph_mgr_module_can_run gauge
ceph_mgr_module_can_run{name="alerts"} 1.0
ceph_mgr_module_can_run{name="balancer"} 1.0
ceph_mgr_module_can_run{name="cephadm"} 1.0
ceph_mgr_module_can_run{name="crash"} 1.0
ceph_mgr_module_can_run{name="dashboard"} 1.0
ceph_mgr_module_can_run{name="devicehealth"} 1.0
ceph_mgr_module_can_run{name="diskprediction_local"} 1.0
ceph_mgr_module_can_run{name="influx"} 0.0
ceph_mgr_module_can_run{name="insights"} 1.0
ceph_mgr_module_can_run{name="iostat"} 1.0
ceph_mgr_module_can_run{name="k8sevents"} 1.0
ceph_mgr_module_can_run{name="localpool"} 1.0
ceph_mgr_module_can_run{name="orchestrator"} 1.0
ceph_mgr_module_can_run{name="osd_support"} 1.0
ceph_mgr_module_can_run{name="pg_autoscaler"} 1.0
ceph_mgr_module_can_run{name="progress"} 1.0
ceph_mgr_module_can_run{name="prometheus"} 1.0
ceph_mgr_module_can_run{name="rbd_support"} 1.0
ceph_mgr_module_can_run{name="restful"} 1.0
ceph_mgr_module_can_run{name="rook"} 1.0
ceph_mgr_module_can_run{name="selftest"} 1.0
ceph_mgr_module_can_run{name="status"} 1.0
ceph_mgr_module_can_run{name="telegraf"} 1.0
ceph_mgr_module_can_run{name="telemetry"} 1.0
ceph_mgr_module_can_run{name="test_orchestrator"} 1.0
ceph_mgr_module_can_run{name="volumes"} 1.0
ceph_mgr_module_can_run{name="zabbix"} 1.0
# HELP ceph_osd_metadata OSD Metadata
# TYPE ceph_osd_metadata untyped
ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0",cluster_addr="172.17.0.11",device_class="hdd",front_iface="eth0",hostname="minikube",objectstore="bluestore",public_addr="172.17.0.11",ceph_version="ceph version 15.2.1 (9fd2f65f91d9246fae2c841a6222d34d121680ee) octopus (stable)"} 1.0
ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1",cluster_addr="172.17.0.9",device_class="hdd",front_iface="eth0",hostname="minikube",objectstore="bluestore",public_addr="172.17.0.9",ceph_version="ceph version 15.2.1 (9fd2f65f91d9246fae2c841a6222d34d121680ee) octopus (stable)"} 1.0
ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2",cluster_addr="172.17.0.16",device_class="hdd",front_iface="eth0",hostname="minikube",objectstore="bluestore",public_addr="172.17.0.16",ceph_version="ceph version 15.2.1 (9fd2f65f91d9246fae2c841a6222d34d121680ee) octopus (stable)"} 1.0
# HELP ceph_disk_occupation Associate Ceph daemon with disk used
# TYPE ceph_disk_occupation untyped
ceph_disk_occupation{ceph_daemon="osd.0",device="/dev/dm-0",db_device="",wal_device="",instance="minikube"} 1.0
ceph_disk_occupation{ceph_daemon="osd.1",device="/dev/dm-1",db_device="",wal_device="",instance="minikube"} 1.0
ceph_disk_occupation{ceph_daemon="osd.2",device="/dev/dm-2",db_device="",wal_device="",instance="minikube"} 1.0
# HELP ceph_pool_metadata POOL Metadata
# TYPE ceph_pool_metadata untyped
ceph_pool_metadata{pool_id="1",name="device_health_metrics"} 1.0
ceph_pool_metadata{pool_id="18",name="my-store.rgw.control"} 1.0
ceph_pool_metadata{pool_id="19",name="my-store.rgw.meta"} 1.0
ceph_pool_metadata{pool_id="20",name="my-store.rgw.log"} 1.0
ceph_pool_metadata{pool_id="21",name="my-store.rgw.buckets.index"} 1.0
ceph_pool_metadata{pool_id="22",name="my-store.rgw.buckets.non-ec"} 1.0
ceph_pool_metadata{pool_id="23",name=".rgw.root"} 1.0
ceph_pool_metadata{pool_id="24",name="my-store.rgw.buckets.data"} 1.0
# HELP ceph_rgw_metadata RGW Metadata
# TYPE ceph_rgw_metadata untyped
ceph_rgw_metadata{ceph_daemon="rgw.my.store.a",hostname="minikube",ceph_version="ceph version 15.2.1 (9fd2f65f91d9246fae2c841a6222d34d121680ee) octopus (stable)"} 1.0
ceph_rgw_metadata{ceph_daemon="rgw.my.store.b",hostname="minikube",ceph_version="ceph version 15.2.1 (9fd2f65f91d9246fae2c841a6222d34d121680ee) octopus (stable)"} 1.0
# HELP ceph_rbd_mirror_metadata RBD Mirror Metadata
# TYPE ceph_rbd_mirror_metadata untyped
# HELP ceph_pg_total PG Total Count per Pool
# TYPE ceph_pg_total gauge
ceph_pg_total{pool_id="1"} 1.0
ceph_pg_total{pool_id="18"} 8.0
ceph_pg_total{pool_id="19"} 8.0
ceph_pg_total{pool_id="20"} 8.0
ceph_pg_total{pool_id="21"} 8.0
ceph_pg_total{pool_id="22"} 8.0
ceph_pg_total{pool_id="23"} 8.0
ceph_pg_total{pool_id="24"} 32.0
# HELP ceph_osd_flag_noup OSD Flag noup
# TYPE ceph_osd_flag_noup untyped
ceph_osd_flag_noup 0.0
# HELP ceph_osd_flag_nodown OSD Flag nodown
# TYPE ceph_osd_flag_nodown untyped
ceph_osd_flag_nodown 0.0
# HELP ceph_osd_flag_noout OSD Flag noout
# TYPE ceph_osd_flag_noout untyped
ceph_osd_flag_noout 0.0
# HELP ceph_osd_flag_noin OSD Flag noin
# TYPE ceph_osd_flag_noin untyped
ceph_osd_flag_noin 0.0
# HELP ceph_osd_flag_nobackfill OSD Flag nobackfill
# TYPE ceph_osd_flag_nobackfill untyped
ceph_osd_flag_nobackfill 0.0
# HELP ceph_osd_flag_norebalance OSD Flag norebalance
# TYPE ceph_osd_flag_norebalance untyped
ceph_osd_flag_norebalance 0.0
# HELP ceph_osd_flag_norecover OSD Flag norecover
# TYPE ceph_osd_flag_norecover untyped
ceph_osd_flag_norecover 0.0
# HELP ceph_osd_flag_noscrub OSD Flag noscrub
# TYPE ceph_osd_flag_noscrub untyped
ceph_osd_flag_noscrub 0.0
# HELP ceph_osd_flag_nodeep_scrub OSD Flag nodeep-scrub
# TYPE ceph_osd_flag_nodeep_scrub untyped
ceph_osd_flag_nodeep_scrub 0.0
# HELP ceph_osd_weight OSD status weight
# TYPE ceph_osd_weight untyped
ceph_osd_weight{ceph_daemon="osd.0"} 1.0
ceph_osd_weight{ceph_daemon="osd.1"} 1.0
ceph_osd_weight{ceph_daemon="osd.2"} 1.0
# HELP ceph_osd_up OSD status up
# TYPE ceph_osd_up untyped
ceph_osd_up{ceph_daemon="osd.0"} 1.0
ceph_osd_up{ceph_daemon="osd.1"} 1.0
ceph_osd_up{ceph_daemon="osd.2"} 1.0
# HELP ceph_osd_in OSD status in
# TYPE ceph_osd_in untyped
ceph_osd_in{ceph_daemon="osd.0"} 1.0
ceph_osd_in{ceph_daemon="osd.1"} 1.0
ceph_osd_in{ceph_daemon="osd.2"} 1.0
# HELP ceph_osd_apply_latency_ms OSD stat apply_latency_ms
# TYPE ceph_osd_apply_latency_ms gauge
ceph_osd_apply_latency_ms{ceph_daemon="osd.2"} 0.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.0"} 0.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.1"} 0.0
# HELP ceph_osd_commit_latency_ms OSD stat commit_latency_ms
# TYPE ceph_osd_commit_latency_ms gauge
ceph_osd_commit_latency_ms{ceph_daemon="osd.2"} 0.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.0"} 0.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.1"} 0.0
# HELP ceph_pool_recovering_objects_per_sec OSD pool stats: recovering_objects_per_sec
# TYPE ceph_pool_recovering_objects_per_sec gauge
ceph_pool_recovering_objects_per_sec{pool_id="1"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="18"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="19"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="20"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="21"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="22"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="23"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="24"} 0.0
# HELP ceph_pool_recovering_bytes_per_sec OSD pool stats: recovering_bytes_per_sec
# TYPE ceph_pool_recovering_bytes_per_sec gauge
ceph_pool_recovering_bytes_per_sec{pool_id="1"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="18"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="19"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="20"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="21"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="22"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="23"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="24"} 0.0
# HELP ceph_pool_recovering_keys_per_sec OSD pool stats: recovering_keys_per_sec
# TYPE ceph_pool_recovering_keys_per_sec gauge
ceph_pool_recovering_keys_per_sec{pool_id="1"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="18"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="19"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="20"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="21"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="22"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="23"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="24"} 0.0
# HELP ceph_pool_num_objects_recovered OSD pool stats: num_objects_recovered
# TYPE ceph_pool_num_objects_recovered gauge
ceph_pool_num_objects_recovered{pool_id="1"} 0.0
ceph_pool_num_objects_recovered{pool_id="18"} 0.0
ceph_pool_num_objects_recovered{pool_id="19"} 0.0
ceph_pool_num_objects_recovered{pool_id="20"} 0.0
ceph_pool_num_objects_recovered{pool_id="21"} 0.0
ceph_pool_num_objects_recovered{pool_id="22"} 0.0
ceph_pool_num_objects_recovered{pool_id="23"} 0.0
ceph_pool_num_objects_recovered{pool_id="24"} 0.0
# HELP ceph_pool_num_bytes_recovered OSD pool stats: num_bytes_recovered
# TYPE ceph_pool_num_bytes_recovered gauge
ceph_pool_num_bytes_recovered{pool_id="1"} 0.0
ceph_pool_num_bytes_recovered{pool_id="18"} 0.0
ceph_pool_num_bytes_recovered{pool_id="19"} 0.0
ceph_pool_num_bytes_recovered{pool_id="20"} 0.0
ceph_pool_num_bytes_recovered{pool_id="21"} 0.0
ceph_pool_num_bytes_recovered{pool_id="22"} 0.0
ceph_pool_num_bytes_recovered{pool_id="23"} 0.0
ceph_pool_num_bytes_recovered{pool_id="24"} 0.0
# HELP ceph_pg_active PG active per pool
# TYPE ceph_pg_active gauge
ceph_pg_active{pool_id="1"} 0.0
ceph_pg_active{pool_id="18"} 8.0
ceph_pg_active{pool_id="19"} 8.0
ceph_pg_active{pool_id="20"} 8.0
ceph_pg_active{pool_id="21"} 8.0
ceph_pg_active{pool_id="22"} 8.0
ceph_pg_active{pool_id="23"} 8.0
ceph_pg_active{pool_id="24"} 32.0
# HELP ceph_pg_clean PG clean per pool
# TYPE ceph_pg_clean gauge
ceph_pg_clean{pool_id="1"} 0.0
ceph_pg_clean{pool_id="18"} 8.0
ceph_pg_clean{pool_id="19"} 8.0
ceph_pg_clean{pool_id="20"} 8.0
ceph_pg_clean{pool_id="21"} 8.0
ceph_pg_clean{pool_id="22"} 8.0
ceph_pg_clean{pool_id="23"} 8.0
ceph_pg_clean{pool_id="24"} 32.0
# HELP ceph_pg_down PG down per pool
# TYPE ceph_pg_down gauge
ceph_pg_down{pool_id="1"} 0.0
ceph_pg_down{pool_id="18"} 0.0
ceph_pg_down{pool_id="19"} 0.0
ceph_pg_down{pool_id="20"} 0.0
ceph_pg_down{pool_id="21"} 0.0
ceph_pg_down{pool_id="22"} 0.0
ceph_pg_down{pool_id="23"} 0.0
ceph_pg_down{pool_id="24"} 0.0
# HELP ceph_pg_recovery_unfound PG recovery_unfound per pool
# TYPE ceph_pg_recovery_unfound gauge
ceph_pg_recovery_unfound{pool_id="1"} 0.0
ceph_pg_recovery_unfound{pool_id="18"} 0.0
ceph_pg_recovery_unfound{pool_id="19"} 0.0
ceph_pg_recovery_unfound{pool_id="20"} 0.0
ceph_pg_recovery_unfound{pool_id="21"} 0.0
ceph_pg_recovery_unfound{pool_id="22"} 0.0
ceph_pg_recovery_unfound{pool_id="23"} 0.0
ceph_pg_recovery_unfound{pool_id="24"} 0.0
# HELP ceph_pg_backfill_unfound PG backfill_unfound per pool
# TYPE ceph_pg_backfill_unfound gauge
ceph_pg_backfill_unfound{pool_id="1"} 0.0
ceph_pg_backfill_unfound{pool_id="18"} 0.0
ceph_pg_backfill_unfound{pool_id="19"} 0.0
ceph_pg_backfill_unfound{pool_id="20"} 0.0
ceph_pg_backfill_unfound{pool_id="21"} 0.0
ceph_pg_backfill_unfound{pool_id="22"} 0.0
ceph_pg_backfill_unfound{pool_id="23"} 0.0
ceph_pg_backfill_unfound{pool_id="24"} 0.0
# HELP ceph_pg_scrubbing PG scrubbing per pool
# TYPE ceph_pg_scrubbing gauge
ceph_pg_scrubbing{pool_id="1"} 0.0
ceph_pg_scrubbing{pool_id="18"} 0.0
ceph_pg_scrubbing{pool_id="19"} 0.0
ceph_pg_scrubbing{pool_id="20"} 0.0
ceph_pg_scrubbing{pool_id="21"} 0.0
ceph_pg_scrubbing{pool_id="22"} 0.0
ceph_pg_scrubbing{pool_id="23"} 0.0
ceph_pg_scrubbing{pool_id="24"} 0.0
# HELP ceph_pg_degraded PG degraded per pool
# TYPE ceph_pg_degraded gauge
ceph_pg_degraded{pool_id="1"} 0.0
ceph_pg_degraded{pool_id="18"} 0.0
ceph_pg_degraded{pool_id="19"} 0.0
ceph_pg_degraded{pool_id="20"} 0.0
ceph_pg_degraded{pool_id="21"} 0.0
ceph_pg_degraded{pool_id="22"} 0.0
ceph_pg_degraded{pool_id="23"} 0.0
ceph_pg_degraded{pool_id="24"} 0.0
# HELP ceph_pg_inconsistent PG inconsistent per pool
# TYPE ceph_pg_inconsistent gauge
ceph_pg_inconsistent{pool_id="1"} 0.0
ceph_pg_inconsistent{pool_id="18"} 0.0
ceph_pg_inconsistent{pool_id="19"} 0.0
ceph_pg_inconsistent{pool_id="20"} 0.0
ceph_pg_inconsistent{pool_id="21"} 0.0
ceph_pg_inconsistent{pool_id="22"} 0.0
ceph_pg_inconsistent{pool_id="23"} 0.0
ceph_pg_inconsistent{pool_id="24"} 0.0
# HELP ceph_pg_peering PG peering per pool
# TYPE ceph_pg_peering gauge
ceph_pg_peering{pool_id="1"} 0.0
ceph_pg_peering{pool_id="18"} 0.0
ceph_pg_peering{pool_id="19"} 0.0
ceph_pg_peering{pool_id="20"} 0.0
ceph_pg_peering{pool_id="21"} 0.0
ceph_pg_peering{pool_id="22"} 0.0
ceph_pg_peering{pool_id="23"} 0.0
ceph_pg_peering{pool_id="24"} 0.0
# HELP ceph_pg_repair PG repair per pool
# TYPE ceph_pg_repair gauge
ceph_pg_repair{pool_id="1"} 0.0
ceph_pg_repair{pool_id="18"} 0.0
ceph_pg_repair{pool_id="19"} 0.0
ceph_pg_repair{pool_id="20"} 0.0
ceph_pg_repair{pool_id="21"} 0.0
ceph_pg_repair{pool_id="22"} 0.0
ceph_pg_repair{pool_id="23"} 0.0
ceph_pg_repair{pool_id="24"} 0.0
# HELP ceph_pg_recovering PG recovering per pool
# TYPE ceph_pg_recovering gauge
ceph_pg_recovering{pool_id="1"} 0.0
ceph_pg_recovering{pool_id="18"} 0.0
ceph_pg_recovering{pool_id="19"} 0.0
ceph_pg_recovering{pool_id="20"} 0.0
ceph_pg_recovering{pool_id="21"} 0.0
ceph_pg_recovering{pool_id="22"} 0.0
ceph_pg_recovering{pool_id="23"} 0.0
ceph_pg_recovering{pool_id="24"} 0.0
# HELP ceph_pg_forced_recovery PG forced_recovery per pool
# TYPE ceph_pg_forced_recovery gauge
ceph_pg_forced_recovery{pool_id="1"} 0.0
ceph_pg_forced_recovery{pool_id="18"} 0.0
ceph_pg_forced_recovery{pool_id="19"} 0.0
ceph_pg_forced_recovery{pool_id="20"} 0.0
ceph_pg_forced_recovery{pool_id="21"} 0.0
ceph_pg_forced_recovery{pool_id="22"} 0.0
ceph_pg_forced_recovery{pool_id="23"} 0.0
ceph_pg_forced_recovery{pool_id="24"} 0.0
# HELP ceph_pg_backfill_wait PG backfill_wait per pool
# TYPE ceph_pg_backfill_wait gauge
ceph_pg_backfill_wait{pool_id="1"} 0.0
ceph_pg_backfill_wait{pool_id="18"} 0.0
ceph_pg_backfill_wait{pool_id="19"} 0.0
ceph_pg_backfill_wait{pool_id="20"} 0.0
ceph_pg_backfill_wait{pool_id="21"} 0.0
ceph_pg_backfill_wait{pool_id="22"} 0.0
ceph_pg_backfill_wait{pool_id="23"} 0.0
ceph_pg_backfill_wait{pool_id="24"} 0.0
# HELP ceph_pg_incomplete PG incomplete per pool
# TYPE ceph_pg_incomplete gauge
ceph_pg_incomplete{pool_id="1"} 0.0
ceph_pg_incomplete{pool_id="18"} 0.0
ceph_pg_incomplete{pool_id="19"} 0.0
ceph_pg_incomplete{pool_id="20"} 0.0
ceph_pg_incomplete{pool_id="21"} 0.0
ceph_pg_incomplete{pool_id="22"} 0.0
ceph_pg_incomplete{pool_id="23"} 0.0
ceph_pg_incomplete{pool_id="24"} 0.0
# HELP ceph_pg_stale PG stale per pool
# TYPE ceph_pg_stale gauge
ceph_pg_stale{pool_id="1"} 0.0
ceph_pg_stale{pool_id="18"} 0.0
ceph_pg_stale{pool_id="19"} 0.0
ceph_pg_stale{pool_id="20"} 0.0
ceph_pg_stale{pool_id="21"} 0.0
ceph_pg_stale{pool_id="22"} 0.0
ceph_pg_stale{pool_id="23"} 0.0
ceph_pg_stale{pool_id="24"} 0.0
# HELP ceph_pg_remapped PG remapped per pool
# TYPE ceph_pg_remapped gauge
ceph_pg_remapped{pool_id="1"} 0.0
ceph_pg_remapped{pool_id="18"} 0.0
ceph_pg_remapped{pool_id="19"} 0.0
ceph_pg_remapped{pool_id="20"} 0.0
ceph_pg_remapped{pool_id="21"} 0.0
ceph_pg_remapped{pool_id="22"} 0.0
ceph_pg_remapped{pool_id="23"} 0.0
ceph_pg_remapped{pool_id="24"} 0.0
# HELP ceph_pg_deep PG deep per pool
# TYPE ceph_pg_deep gauge
ceph_pg_deep{pool_id="1"} 0.0
ceph_pg_deep{pool_id="18"} 0.0
ceph_pg_deep{pool_id="19"} 0.0
ceph_pg_deep{pool_id="20"} 0.0
ceph_pg_deep{pool_id="21"} 0.0
ceph_pg_deep{pool_id="22"} 0.0
ceph_pg_deep{pool_id="23"} 0.0
ceph_pg_deep{pool_id="24"} 0.0
# HELP ceph_pg_backfilling PG backfilling per pool
# TYPE ceph_pg_backfilling gauge
ceph_pg_backfilling{pool_id="1"} 0.0
ceph_pg_backfilling{pool_id="18"} 0.0
ceph_pg_backfilling{pool_id="19"} 0.0
ceph_pg_backfilling{pool_id="20"} 0.0
ceph_pg_backfilling{pool_id="21"} 0.0
ceph_pg_backfilling{pool_id="22"} 0.0
ceph_pg_backfilling{pool_id="23"} 0.0
ceph_pg_backfilling{pool_id="24"} 0.0
# HELP ceph_pg_forced_backfill PG forced_backfill per pool
# TYPE ceph_pg_forced_backfill gauge
ceph_pg_forced_backfill{pool_id="1"} 0.0
ceph_pg_forced_backfill{pool_id="18"} 0.0
ceph_pg_forced_backfill{pool_id="19"} 0.0
ceph_pg_forced_backfill{pool_id="20"} 0.0
ceph_pg_forced_backfill{pool_id="21"} 0.0
ceph_pg_forced_backfill{pool_id="22"} 0.0
ceph_pg_forced_backfill{pool_id="23"} 0.0
ceph_pg_forced_backfill{pool_id="24"} 0.0
# HELP ceph_pg_backfill_toofull PG backfill_toofull per pool
# TYPE ceph_pg_backfill_toofull gauge
ceph_pg_backfill_toofull{pool_id="1"} 0.0
ceph_pg_backfill_toofull{pool_id="18"} 0.0
ceph_pg_backfill_toofull{pool_id="19"} 0.0
ceph_pg_backfill_toofull{pool_id="20"} 0.0
ceph_pg_backfill_toofull{pool_id="21"} 0.0
ceph_pg_backfill_toofull{pool_id="22"} 0.0
ceph_pg_backfill_toofull{pool_id="23"} 0.0
ceph_pg_backfill_toofull{pool_id="24"} 0.0
# HELP ceph_pg_recovery_wait PG recovery_wait per pool
# TYPE ceph_pg_recovery_wait gauge
ceph_pg_recovery_wait{pool_id="1"} 0.0
ceph_pg_recovery_wait{pool_id="18"} 0.0
ceph_pg_recovery_wait{pool_id="19"} 0.0
ceph_pg_recovery_wait{pool_id="20"} 0.0
ceph_pg_recovery_wait{pool_id="21"} 0.0
ceph_pg_recovery_wait{pool_id="22"} 0.0
ceph_pg_recovery_wait{pool_id="23"} 0.0
ceph_pg_recovery_wait{pool_id="24"} 0.0
# HELP ceph_pg_recovery_toofull PG recovery_toofull per pool
# TYPE ceph_pg_recovery_toofull gauge
ceph_pg_recovery_toofull{pool_id="1"} 0.0
ceph_pg_recovery_toofull{pool_id="18"} 0.0
ceph_pg_recovery_toofull{pool_id="19"} 0.0
ceph_pg_recovery_toofull{pool_id="20"} 0.0
ceph_pg_recovery_toofull{pool_id="21"} 0.0
ceph_pg_recovery_toofull{pool_id="22"} 0.0
ceph_pg_recovery_toofull{pool_id="23"} 0.0
ceph_pg_recovery_toofull{pool_id="24"} 0.0
# HELP ceph_pg_undersized PG undersized per pool
# TYPE ceph_pg_undersized gauge
ceph_pg_undersized{pool_id="1"} 1.0
ceph_pg_undersized{pool_id="18"} 0.0
ceph_pg_undersized{pool_id="19"} 0.0
ceph_pg_undersized{pool_id="20"} 0.0
ceph_pg_undersized{pool_id="21"} 0.0
ceph_pg_undersized{pool_id="22"} 0.0
ceph_pg_undersized{pool_id="23"} 0.0
ceph_pg_undersized{pool_id="24"} 0.0
# HELP ceph_pg_activating PG activating per pool
# TYPE ceph_pg_activating gauge
ceph_pg_activating{pool_id="1"} 0.0
ceph_pg_activating{pool_id="18"} 0.0
ceph_pg_activating{pool_id="19"} 0.0
ceph_pg_activating{pool_id="20"} 0.0
ceph_pg_activating{pool_id="21"} 0.0
ceph_pg_activating{pool_id="22"} 0.0
ceph_pg_activating{pool_id="23"} 0.0
ceph_pg_activating{pool_id="24"} 0.0
# HELP ceph_pg_peered PG peered per pool
# TYPE ceph_pg_peered gauge
ceph_pg_peered{pool_id="1"} 1.0
ceph_pg_peered{pool_id="18"} 0.0
ceph_pg_peered{pool_id="19"} 0.0
ceph_pg_peered{pool_id="20"} 0.0
ceph_pg_peered{pool_id="21"} 0.0
ceph_pg_peered{pool_id="22"} 0.0
ceph_pg_peered{pool_id="23"} 0.0
ceph_pg_peered{pool_id="24"} 0.0
# HELP ceph_pg_snaptrim PG snaptrim per pool
# TYPE ceph_pg_snaptrim gauge
ceph_pg_snaptrim{pool_id="1"} 0.0
ceph_pg_snaptrim{pool_id="18"} 0.0
ceph_pg_snaptrim{pool_id="19"} 0.0
ceph_pg_snaptrim{pool_id="20"} 0.0
ceph_pg_snaptrim{pool_id="21"} 0.0
ceph_pg_snaptrim{pool_id="22"} 0.0
ceph_pg_snaptrim{pool_id="23"} 0.0
ceph_pg_snaptrim{pool_id="24"} 0.0
# HELP ceph_pg_snaptrim_wait PG snaptrim_wait per pool
# TYPE ceph_pg_snaptrim_wait gauge
ceph_pg_snaptrim_wait{pool_id="1"} 0.0
ceph_pg_snaptrim_wait{pool_id="18"} 0.0
ceph_pg_snaptrim_wait{pool_id="19"} 0.0
ceph_pg_snaptrim_wait{pool_id="20"} 0.0
ceph_pg_snaptrim_wait{pool_id="21"} 0.0
ceph_pg_snaptrim_wait{pool_id="22"} 0.0
ceph_pg_snaptrim_wait{pool_id="23"} 0.0
ceph_pg_snaptrim_wait{pool_id="24"} 0.0
# HELP ceph_pg_snaptrim_error PG snaptrim_error per pool
# TYPE ceph_pg_snaptrim_error gauge
ceph_pg_snaptrim_error{pool_id="1"} 0.0
ceph_pg_snaptrim_error{pool_id="18"} 0.0
ceph_pg_snaptrim_error{pool_id="19"} 0.0
ceph_pg_snaptrim_error{pool_id="20"} 0.0
ceph_pg_snaptrim_error{pool_id="21"} 0.0
ceph_pg_snaptrim_error{pool_id="22"} 0.0
ceph_pg_snaptrim_error{pool_id="23"} 0.0
ceph_pg_snaptrim_error{pool_id="24"} 0.0
# HELP ceph_pg_creating PG creating per pool
# TYPE ceph_pg_creating gauge
ceph_pg_creating{pool_id="1"} 0.0
ceph_pg_creating{pool_id="18"} 0.0
ceph_pg_creating{pool_id="19"} 0.0
ceph_pg_creating{pool_id="20"} 0.0
ceph_pg_creating{pool_id="21"} 0.0
ceph_pg_creating{pool_id="22"} 0.0
ceph_pg_creating{pool_id="23"} 0.0
ceph_pg_creating{pool_id="24"} 0.0
# HELP ceph_pg_unknown PG unknown per pool
# TYPE ceph_pg_unknown gauge
ceph_pg_unknown{pool_id="1"} 0.0
ceph_pg_unknown{pool_id="18"} 0.0
ceph_pg_unknown{pool_id="19"} 0.0
ceph_pg_unknown{pool_id="20"} 0.0
ceph_pg_unknown{pool_id="21"} 0.0
ceph_pg_unknown{pool_id="22"} 0.0
ceph_pg_unknown{pool_id="23"} 0.0
ceph_pg_unknown{pool_id="24"} 0.0
# HELP ceph_cluster_total_bytes DF total_bytes
# TYPE ceph_cluster_total_bytes gauge
ceph_cluster_total_bytes 96624181248.0
# HELP ceph_cluster_total_used_bytes DF total_used_bytes
# TYPE ceph_cluster_total_used_bytes gauge
ceph_cluster_total_used_bytes 283508736.0
# HELP ceph_cluster_total_used_raw_bytes DF total_used_raw_bytes
# TYPE ceph_cluster_total_used_raw_bytes gauge
ceph_cluster_total_used_raw_bytes 3504734208.0
# HELP ceph_pool_max_avail DF pool max_avail
# TYPE ceph_pool_max_avail gauge
ceph_pool_max_avail{pool_id="1"} 29429016576.0
ceph_pool_max_avail{pool_id="18"} 88287051776.0
ceph_pool_max_avail{pool_id="19"} 88287051776.0
ceph_pool_max_avail{pool_id="20"} 88287051776.0
ceph_pool_max_avail{pool_id="21"} 88287051776.0
ceph_pool_max_avail{pool_id="22"} 88287051776.0
ceph_pool_max_avail{pool_id="23"} 88287051776.0
ceph_pool_max_avail{pool_id="24"} 88287051776.0
# HELP ceph_pool_stored DF pool stored
# TYPE ceph_pool_stored gauge
ceph_pool_stored{pool_id="1"} 0.0
ceph_pool_stored{pool_id="18"} 0.0
ceph_pool_stored{pool_id="19"} 1940.0
ceph_pool_stored{pool_id="20"} 3583.0
ceph_pool_stored{pool_id="21"} 990091.0
ceph_pool_stored{pool_id="22"} 0.0
ceph_pool_stored{pool_id="23"} 3832.0
ceph_pool_stored{pool_id="24"} 0.0
# HELP ceph_pool_stored_raw DF pool stored_raw
# TYPE ceph_pool_stored_raw gauge
ceph_pool_stored_raw{pool_id="1"} 0.0
ceph_pool_stored_raw{pool_id="18"} 0.0
ceph_pool_stored_raw{pool_id="19"} 1940.0
ceph_pool_stored_raw{pool_id="20"} 3583.0
ceph_pool_stored_raw{pool_id="21"} 990091.0
ceph_pool_stored_raw{pool_id="22"} 0.0
ceph_pool_stored_raw{pool_id="23"} 3832.0
ceph_pool_stored_raw{pool_id="24"} 0.0
# HELP ceph_pool_objects DF pool objects
# TYPE ceph_pool_objects gauge
ceph_pool_objects{pool_id="1"} 0.0
ceph_pool_objects{pool_id="18"} 8.0
ceph_pool_objects{pool_id="19"} 9.0
ceph_pool_objects{pool_id="20"} 212.0
ceph_pool_objects{pool_id="21"} 2.0
ceph_pool_objects{pool_id="22"} 0.0
ceph_pool_objects{pool_id="23"} 16.0
ceph_pool_objects{pool_id="24"} 0.0
# HELP ceph_pool_dirty DF pool dirty
# TYPE ceph_pool_dirty gauge
ceph_pool_dirty{pool_id="1"} 0.0
ceph_pool_dirty{pool_id="18"} 8.0
ceph_pool_dirty{pool_id="19"} 9.0
ceph_pool_dirty{pool_id="20"} 212.0
ceph_pool_dirty{pool_id="21"} 2.0
ceph_pool_dirty{pool_id="22"} 0.0
ceph_pool_dirty{pool_id="23"} 16.0
ceph_pool_dirty{pool_id="24"} 0.0
# HELP ceph_pool_quota_bytes DF pool quota_bytes
# TYPE ceph_pool_quota_bytes gauge
ceph_pool_quota_bytes{pool_id="1"} 0.0
ceph_pool_quota_bytes{pool_id="18"} 0.0
ceph_pool_quota_bytes{pool_id="19"} 0.0
ceph_pool_quota_bytes{pool_id="20"} 0.0
ceph_pool_quota_bytes{pool_id="21"} 0.0
ceph_pool_quota_bytes{pool_id="22"} 0.0
ceph_pool_quota_bytes{pool_id="23"} 0.0
ceph_pool_quota_bytes{pool_id="24"} 0.0
# HELP ceph_pool_quota_objects DF pool quota_objects
# TYPE ceph_pool_quota_objects gauge
ceph_pool_quota_objects{pool_id="1"} 0.0
ceph_pool_quota_objects{pool_id="18"} 0.0
ceph_pool_quota_objects{pool_id="19"} 0.0
ceph_pool_quota_objects{pool_id="20"} 0.0
ceph_pool_quota_objects{pool_id="21"} 0.0
ceph_pool_quota_objects{pool_id="22"} 0.0
ceph_pool_quota_objects{pool_id="23"} 0.0
ceph_pool_quota_objects{pool_id="24"} 0.0
# HELP ceph_pool_rd DF pool rd
# TYPE ceph_pool_rd gauge
ceph_pool_rd{pool_id="1"} 0.0
ceph_pool_rd{pool_id="18"} 0.0
ceph_pool_rd{pool_id="19"} 707.0
ceph_pool_rd{pool_id="20"} 530615.0
ceph_pool_rd{pool_id="21"} 3.0
ceph_pool_rd{pool_id="22"} 0.0
ceph_pool_rd{pool_id="23"} 142.0
ceph_pool_rd{pool_id="24"} 0.0
# HELP ceph_pool_rd_bytes DF pool rd_bytes
# TYPE ceph_pool_rd_bytes gauge
ceph_pool_rd_bytes{pool_id="1"} 0.0
ceph_pool_rd_bytes{pool_id="18"} 0.0
ceph_pool_rd_bytes{pool_id="19"} 618496.0
ceph_pool_rd_bytes{pool_id="20"} 543313920.0
ceph_pool_rd_bytes{pool_id="21"} 3072.0
ceph_pool_rd_bytes{pool_id="22"} 0.0
ceph_pool_rd_bytes{pool_id="23"} 145408.0
ceph_pool_rd_bytes{pool_id="24"} 0.0
# HELP ceph_pool_wr DF pool wr
# TYPE ceph_pool_wr gauge
ceph_pool_wr{pool_id="1"} 0.0
ceph_pool_wr{pool_id="18"} 0.0
ceph_pool_wr{pool_id="19"} 15.0
ceph_pool_wr{pool_id="20"} 353412.0
ceph_pool_wr{pool_id="21"} 0.0
ceph_pool_wr{pool_id="22"} 0.0
ceph_pool_wr{pool_id="23"} 0.0
ceph_pool_wr{pool_id="24"} 0.0
# HELP ceph_pool_wr_bytes DF pool wr_bytes
# TYPE ceph_pool_wr_bytes gauge
ceph_pool_wr_bytes{pool_id="1"} 0.0
ceph_pool_wr_bytes{pool_id="18"} 0.0
ceph_pool_wr_bytes{pool_id="19"} 3072.0
ceph_pool_wr_bytes{pool_id="20"} 57344.0
ceph_pool_wr_bytes{pool_id="21"} 0.0
ceph_pool_wr_bytes{pool_id="22"} 0.0
ceph_pool_wr_bytes{pool_id="23"} 0.0
ceph_pool_wr_bytes{pool_id="24"} 0.0
# HELP ceph_num_objects_degraded Number of degraded objects
# TYPE ceph_num_objects_degraded gauge
ceph_num_objects_degraded 0.0
# HELP ceph_num_objects_misplaced Number of misplaced objects
# TYPE ceph_num_objects_misplaced gauge
ceph_num_objects_misplaced 0.0
# HELP ceph_num_objects_unfound Number of unfound objects
# TYPE ceph_num_objects_unfound gauge
ceph_num_objects_unfound 0.0
# HELP ceph_mon_election_call Elections started
# TYPE ceph_mon_election_call counter
ceph_mon_election_call{ceph_daemon="mon.a"} 0.0
# HELP ceph_mon_election_lose Elections lost
# TYPE ceph_mon_election_lose counter
ceph_mon_election_lose{ceph_daemon="mon.a"} 0.0
# HELP ceph_mon_election_win Elections won
# TYPE ceph_mon_election_win counter
ceph_mon_election_win{ceph_daemon="mon.a"} 2.0
# HELP ceph_mon_num_elections Elections participated in
# TYPE ceph_mon_num_elections counter
ceph_mon_num_elections{ceph_daemon="mon.a"} 2.0
# HELP ceph_mon_num_sessions Open sessions
# TYPE ceph_mon_num_sessions gauge
ceph_mon_num_sessions{ceph_daemon="mon.a"} 14.0
# HELP ceph_mon_session_add Created sessions
# TYPE ceph_mon_session_add counter
ceph_mon_session_add{ceph_daemon="mon.a"} 144.0
# HELP ceph_mon_session_rm Removed sessions
# TYPE ceph_mon_session_rm counter
ceph_mon_session_rm{ceph_daemon="mon.a"} 170285.0
# HELP ceph_mon_session_trim Trimmed sessions
# TYPE ceph_mon_session_trim counter
ceph_mon_session_trim{ceph_daemon="mon.a"} 254.0
# HELP ceph_paxos_accept_timeout Accept timeouts
# TYPE ceph_paxos_accept_timeout counter
ceph_paxos_accept_timeout{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_begin Started and handled begins
# TYPE ceph_paxos_begin counter
ceph_paxos_begin{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_begin_bytes_sum Data in transaction on begin Total
# TYPE ceph_paxos_begin_bytes_sum counter
ceph_paxos_begin_bytes_sum{ceph_daemon="mon.a"} 31041467520.0
# HELP ceph_paxos_begin_bytes_count Data in transaction on begin Count
# TYPE ceph_paxos_begin_bytes_count counter
ceph_paxos_begin_bytes_count{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_begin_keys_sum Keys in transaction on begin Total
# TYPE ceph_paxos_begin_keys_sum counter
ceph_paxos_begin_keys_sum{ceph_daemon="mon.a"} 5933622.0
# HELP ceph_paxos_begin_keys_count Keys in transaction on begin Count
# TYPE ceph_paxos_begin_keys_count counter
ceph_paxos_begin_keys_count{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_begin_latency_sum Latency of begin operation Total
# TYPE ceph_paxos_begin_latency_sum counter
ceph_paxos_begin_latency_sum{ceph_daemon="mon.a"} 10428.619146308
# HELP ceph_paxos_begin_latency_count Latency of begin operation Count
# TYPE ceph_paxos_begin_latency_count counter
ceph_paxos_begin_latency_count{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_collect Peon collects
# TYPE ceph_paxos_collect counter
ceph_paxos_collect{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_collect_bytes_sum Data in transaction on peon collect Total
# TYPE ceph_paxos_collect_bytes_sum counter
ceph_paxos_collect_bytes_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_collect_bytes_count Data in transaction on peon collect Count
# TYPE ceph_paxos_collect_bytes_count counter
ceph_paxos_collect_bytes_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_collect_keys_sum Keys in transaction on peon collect Total
# TYPE ceph_paxos_collect_keys_sum counter
ceph_paxos_collect_keys_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_collect_keys_count Keys in transaction on peon collect Count
# TYPE ceph_paxos_collect_keys_count counter
ceph_paxos_collect_keys_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_collect_latency_sum Peon collect latency Total
# TYPE ceph_paxos_collect_latency_sum counter
ceph_paxos_collect_latency_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_collect_latency_count Peon collect latency Count
# TYPE ceph_paxos_collect_latency_count counter
ceph_paxos_collect_latency_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_collect_timeout Collect timeouts
# TYPE ceph_paxos_collect_timeout counter
ceph_paxos_collect_timeout{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_collect_uncommitted Uncommitted values in started and handled collects
# TYPE ceph_paxos_collect_uncommitted counter
ceph_paxos_collect_uncommitted{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_commit Commits
# TYPE ceph_paxos_commit counter
ceph_paxos_commit{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_commit_bytes_sum Data in transaction on commit Total
# TYPE ceph_paxos_commit_bytes_sum counter
ceph_paxos_commit_bytes_sum{ceph_daemon="mon.a"} 30891342672.0
# HELP ceph_paxos_commit_bytes_count Data in transaction on commit Count
# TYPE ceph_paxos_commit_bytes_count counter
ceph_paxos_commit_bytes_count{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_commit_keys_sum Keys in transaction on commit Total
# TYPE ceph_paxos_commit_keys_sum counter
ceph_paxos_commit_keys_sum{ceph_daemon="mon.a"} 13900869.0
# HELP ceph_paxos_commit_keys_count Keys in transaction on commit Count
# TYPE ceph_paxos_commit_keys_count counter
ceph_paxos_commit_keys_count{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_commit_latency_sum Commit latency Total
# TYPE ceph_paxos_commit_latency_sum counter
ceph_paxos_commit_latency_sum{ceph_daemon="mon.a"} 9266.2174453
# HELP ceph_paxos_commit_latency_count Commit latency Count
# TYPE ceph_paxos_commit_latency_count counter
ceph_paxos_commit_latency_count{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_lease_ack_timeout Lease acknowledgement timeouts
# TYPE ceph_paxos_lease_ack_timeout counter
ceph_paxos_lease_ack_timeout{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_lease_timeout Lease timeouts
# TYPE ceph_paxos_lease_timeout counter
ceph_paxos_lease_timeout{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_new_pn New proposal number queries
# TYPE ceph_paxos_new_pn counter
ceph_paxos_new_pn{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_new_pn_latency_sum New proposal number getting latency Total
# TYPE ceph_paxos_new_pn_latency_sum counter
ceph_paxos_new_pn_latency_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_new_pn_latency_count New proposal number getting latency Count
# TYPE ceph_paxos_new_pn_latency_count counter
ceph_paxos_new_pn_latency_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_refresh Refreshes
# TYPE ceph_paxos_refresh counter
ceph_paxos_refresh{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_refresh_latency_sum Refresh latency Total
# TYPE ceph_paxos_refresh_latency_sum counter
ceph_paxos_refresh_latency_sum{ceph_daemon="mon.a"} 8147.336406793
# HELP ceph_paxos_refresh_latency_count Refresh latency Count
# TYPE ceph_paxos_refresh_latency_count counter
ceph_paxos_refresh_latency_count{ceph_daemon="mon.a"} 1977874.0
# HELP ceph_paxos_restart Restarts
# TYPE ceph_paxos_restart counter
ceph_paxos_restart{ceph_daemon="mon.a"} 4.0
# HELP ceph_paxos_share_state Sharings of state
# TYPE ceph_paxos_share_state counter
ceph_paxos_share_state{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_share_state_bytes_sum Data in shared state Total
# TYPE ceph_paxos_share_state_bytes_sum counter
ceph_paxos_share_state_bytes_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_share_state_bytes_count Data in shared state Count
# TYPE ceph_paxos_share_state_bytes_count counter
ceph_paxos_share_state_bytes_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_share_state_keys_sum Keys in shared state Total
# TYPE ceph_paxos_share_state_keys_sum counter
ceph_paxos_share_state_keys_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_share_state_keys_count Keys in shared state Count
# TYPE ceph_paxos_share_state_keys_count counter
ceph_paxos_share_state_keys_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_start_leader Starts in leader role
# TYPE ceph_paxos_start_leader counter
ceph_paxos_start_leader{ceph_daemon="mon.a"} 2.0
# HELP ceph_paxos_start_peon Starts in peon role
# TYPE ceph_paxos_start_peon counter
ceph_paxos_start_peon{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_store_state Store a shared state on disk
# TYPE ceph_paxos_store_state counter
ceph_paxos_store_state{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_store_state_bytes_sum Data in transaction in stored state Total
# TYPE ceph_paxos_store_state_bytes_sum counter
ceph_paxos_store_state_bytes_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_store_state_bytes_count Data in transaction in stored state Count
# TYPE ceph_paxos_store_state_bytes_count counter
ceph_paxos_store_state_bytes_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_store_state_keys_sum Keys in transaction in stored state Total
# TYPE ceph_paxos_store_state_keys_sum counter
ceph_paxos_store_state_keys_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_store_state_keys_count Keys in transaction in stored state Count
# TYPE ceph_paxos_store_state_keys_count counter
ceph_paxos_store_state_keys_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_store_state_latency_sum Storing state latency Total
# TYPE ceph_paxos_store_state_latency_sum counter
ceph_paxos_store_state_latency_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_paxos_store_state_latency_count Storing state latency Count
# TYPE ceph_paxos_store_state_latency_count counter
ceph_paxos_store_state_latency_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache_cache_bytes current memory available for caches.
# TYPE ceph_prioritycache_cache_bytes gauge
ceph_prioritycache_cache_bytes{ceph_daemon="mon.a"} 1020054731.0
ceph_prioritycache_cache_bytes{ceph_daemon="osd.0"} 10094215658.0
ceph_prioritycache_cache_bytes{ceph_daemon="osd.1"} 10094215658.0
ceph_prioritycache_cache_bytes{ceph_daemon="osd.2"} 10094215658.0
# HELP ceph_prioritycache_heap_bytes aggregate bytes in use by the heap
# TYPE ceph_prioritycache_heap_bytes gauge
ceph_prioritycache_heap_bytes{ceph_daemon="mon.a"} 1170104320.0
ceph_prioritycache_heap_bytes{ceph_daemon="osd.0"} 236331008.0
ceph_prioritycache_heap_bytes{ceph_daemon="osd.1"} 274079744.0
ceph_prioritycache_heap_bytes{ceph_daemon="osd.2"} 428032000.0
# HELP ceph_prioritycache_mapped_bytes total bytes mapped by the process
# TYPE ceph_prioritycache_mapped_bytes gauge
ceph_prioritycache_mapped_bytes{ceph_daemon="mon.a"} 978681856.0
ceph_prioritycache_mapped_bytes{ceph_daemon="osd.0"} 234438656.0
ceph_prioritycache_mapped_bytes{ceph_daemon="osd.1"} 270647296.0
ceph_prioritycache_mapped_bytes{ceph_daemon="osd.2"} 151855104.0
# HELP ceph_prioritycache_target_bytes target process memory usage in bytes
# TYPE ceph_prioritycache_target_bytes gauge
ceph_prioritycache_target_bytes{ceph_daemon="mon.a"} 2147483648.0
ceph_prioritycache_target_bytes{ceph_daemon="osd.0"} 12822967091.0
ceph_prioritycache_target_bytes{ceph_daemon="osd.1"} 12822967091.0
ceph_prioritycache_target_bytes{ceph_daemon="osd.2"} 12822967091.0
# HELP ceph_prioritycache_unmapped_bytes unmapped bytes that the kernel has yet to reclaimed
# TYPE ceph_prioritycache_unmapped_bytes gauge
ceph_prioritycache_unmapped_bytes{ceph_daemon="mon.a"} 191422464.0
ceph_prioritycache_unmapped_bytes{ceph_daemon="osd.0"} 1892352.0
ceph_prioritycache_unmapped_bytes{ceph_daemon="osd.1"} 3432448.0
ceph_prioritycache_unmapped_bytes{ceph_daemon="osd.2"} 276176896.0
# HELP ceph_prioritycache:full_committed_bytes total bytes committed,
# TYPE ceph_prioritycache:full_committed_bytes gauge
ceph_prioritycache:full_committed_bytes{ceph_daemon="mon.a"} 75497472.0
# HELP ceph_prioritycache:full_pri0_bytes bytes allocated to pri0
# TYPE ceph_prioritycache:full_pri0_bytes gauge
ceph_prioritycache:full_pri0_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri10_bytes bytes allocated to pri10
# TYPE ceph_prioritycache:full_pri10_bytes gauge
ceph_prioritycache:full_pri10_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri11_bytes bytes allocated to pri11
# TYPE ceph_prioritycache:full_pri11_bytes gauge
ceph_prioritycache:full_pri11_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri1_bytes bytes allocated to pri1
# TYPE ceph_prioritycache:full_pri1_bytes gauge
ceph_prioritycache:full_pri1_bytes{ceph_daemon="mon.a"} 7807905.0
# HELP ceph_prioritycache:full_pri2_bytes bytes allocated to pri2
# TYPE ceph_prioritycache:full_pri2_bytes gauge
ceph_prioritycache:full_pri2_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri3_bytes bytes allocated to pri3
# TYPE ceph_prioritycache:full_pri3_bytes gauge
ceph_prioritycache:full_pri3_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri4_bytes bytes allocated to pri4
# TYPE ceph_prioritycache:full_pri4_bytes gauge
ceph_prioritycache:full_pri4_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri5_bytes bytes allocated to pri5
# TYPE ceph_prioritycache:full_pri5_bytes gauge
ceph_prioritycache:full_pri5_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri6_bytes bytes allocated to pri6
# TYPE ceph_prioritycache:full_pri6_bytes gauge
ceph_prioritycache:full_pri6_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri7_bytes bytes allocated to pri7
# TYPE ceph_prioritycache:full_pri7_bytes gauge
ceph_prioritycache:full_pri7_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri8_bytes bytes allocated to pri8
# TYPE ceph_prioritycache:full_pri8_bytes gauge
ceph_prioritycache:full_pri8_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_pri9_bytes bytes allocated to pri9
# TYPE ceph_prioritycache:full_pri9_bytes gauge
ceph_prioritycache:full_pri9_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:full_reserved_bytes bytes reserved for future growth.
# TYPE ceph_prioritycache:full_reserved_bytes gauge
ceph_prioritycache:full_reserved_bytes{ceph_daemon="mon.a"} 67689567.0
# HELP ceph_prioritycache:inc_committed_bytes total bytes committed,
# TYPE ceph_prioritycache:inc_committed_bytes gauge
ceph_prioritycache:inc_committed_bytes{ceph_daemon="mon.a"} 71303168.0
# HELP ceph_prioritycache:inc_pri0_bytes bytes allocated to pri0
# TYPE ceph_prioritycache:inc_pri0_bytes gauge
ceph_prioritycache:inc_pri0_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri10_bytes bytes allocated to pri10
# TYPE ceph_prioritycache:inc_pri10_bytes gauge
ceph_prioritycache:inc_pri10_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri11_bytes bytes allocated to pri11
# TYPE ceph_prioritycache:inc_pri11_bytes gauge
ceph_prioritycache:inc_pri11_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri1_bytes bytes allocated to pri1
# TYPE ceph_prioritycache:inc_pri1_bytes gauge
ceph_prioritycache:inc_pri1_bytes{ceph_daemon="mon.a"} 616589.0
# HELP ceph_prioritycache:inc_pri2_bytes bytes allocated to pri2
# TYPE ceph_prioritycache:inc_pri2_bytes gauge
ceph_prioritycache:inc_pri2_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri3_bytes bytes allocated to pri3
# TYPE ceph_prioritycache:inc_pri3_bytes gauge
ceph_prioritycache:inc_pri3_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri4_bytes bytes allocated to pri4
# TYPE ceph_prioritycache:inc_pri4_bytes gauge
ceph_prioritycache:inc_pri4_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri5_bytes bytes allocated to pri5
# TYPE ceph_prioritycache:inc_pri5_bytes gauge
ceph_prioritycache:inc_pri5_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri6_bytes bytes allocated to pri6
# TYPE ceph_prioritycache:inc_pri6_bytes gauge
ceph_prioritycache:inc_pri6_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri7_bytes bytes allocated to pri7
# TYPE ceph_prioritycache:inc_pri7_bytes gauge
ceph_prioritycache:inc_pri7_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri8_bytes bytes allocated to pri8
# TYPE ceph_prioritycache:inc_pri8_bytes gauge
ceph_prioritycache:inc_pri8_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_pri9_bytes bytes allocated to pri9
# TYPE ceph_prioritycache:inc_pri9_bytes gauge
ceph_prioritycache:inc_pri9_bytes{ceph_daemon="mon.a"} 0.0
# HELP ceph_prioritycache:inc_reserved_bytes bytes reserved for future growth.
# TYPE ceph_prioritycache:inc_reserved_bytes gauge
ceph_prioritycache:inc_reserved_bytes{ceph_daemon="mon.a"} 70686579.0
# HELP ceph_prioritycache:kv_committed_bytes total bytes committed,
# TYPE ceph_prioritycache:kv_committed_bytes gauge
ceph_prioritycache:kv_committed_bytes{ceph_daemon="mon.a"} 868220928.0
ceph_prioritycache:kv_committed_bytes{ceph_daemon="osd.0"} 3976200192.0
ceph_prioritycache:kv_committed_bytes{ceph_daemon="osd.1"} 3976200192.0
ceph_prioritycache:kv_committed_bytes{ceph_daemon="osd.2"} 3976200192.0
# HELP ceph_prioritycache:kv_pri0_bytes bytes allocated to pri0
# TYPE ceph_prioritycache:kv_pri0_bytes gauge
ceph_prioritycache:kv_pri0_bytes{ceph_daemon="mon.a"} 92464.0
ceph_prioritycache:kv_pri0_bytes{ceph_daemon="osd.0"} 545680.0
ceph_prioritycache:kv_pri0_bytes{ceph_daemon="osd.1"} 824512.0
ceph_prioritycache:kv_pri0_bytes{ceph_daemon="osd.2"} 1000560.0
# HELP ceph_prioritycache:kv_pri10_bytes bytes allocated to pri10
# TYPE ceph_prioritycache:kv_pri10_bytes gauge
ceph_prioritycache:kv_pri10_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri10_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri10_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri10_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_pri11_bytes bytes allocated to pri11
# TYPE ceph_prioritycache:kv_pri11_bytes gauge
ceph_prioritycache:kv_pri11_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri11_bytes{ceph_daemon="osd.0"} 3694809112.0
ceph_prioritycache:kv_pri11_bytes{ceph_daemon="osd.1"} 3694589432.0
ceph_prioritycache:kv_pri11_bytes{ceph_daemon="osd.2"} 3694142763.0
# HELP ceph_prioritycache:kv_pri1_bytes bytes allocated to pri1
# TYPE ceph_prioritycache:kv_pri1_bytes gauge
ceph_prioritycache:kv_pri1_bytes{ceph_daemon="mon.a"} 797628269.0
ceph_prioritycache:kv_pri1_bytes{ceph_daemon="osd.0"} 207264.0
ceph_prioritycache:kv_pri1_bytes{ceph_daemon="osd.1"} 456960.0
ceph_prioritycache:kv_pri1_bytes{ceph_daemon="osd.2"} 45168.0
# HELP ceph_prioritycache:kv_pri2_bytes bytes allocated to pri2
# TYPE ceph_prioritycache:kv_pri2_bytes gauge
ceph_prioritycache:kv_pri2_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri2_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri2_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri2_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_pri3_bytes bytes allocated to pri3
# TYPE ceph_prioritycache:kv_pri3_bytes gauge
ceph_prioritycache:kv_pri3_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri3_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri3_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri3_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_pri4_bytes bytes allocated to pri4
# TYPE ceph_prioritycache:kv_pri4_bytes gauge
ceph_prioritycache:kv_pri4_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri4_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri4_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri4_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_pri5_bytes bytes allocated to pri5
# TYPE ceph_prioritycache:kv_pri5_bytes gauge
ceph_prioritycache:kv_pri5_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri5_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri5_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri5_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_pri6_bytes bytes allocated to pri6
# TYPE ceph_prioritycache:kv_pri6_bytes gauge
ceph_prioritycache:kv_pri6_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri6_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri6_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri6_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_pri7_bytes bytes allocated to pri7
# TYPE ceph_prioritycache:kv_pri7_bytes gauge
ceph_prioritycache:kv_pri7_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri7_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri7_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri7_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_pri8_bytes bytes allocated to pri8
# TYPE ceph_prioritycache:kv_pri8_bytes gauge
ceph_prioritycache:kv_pri8_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri8_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri8_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri8_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_pri9_bytes bytes allocated to pri9
# TYPE ceph_prioritycache:kv_pri9_bytes gauge
ceph_prioritycache:kv_pri9_bytes{ceph_daemon="mon.a"} 0.0
ceph_prioritycache:kv_pri9_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:kv_pri9_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:kv_pri9_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:kv_reserved_bytes bytes reserved for future growth.
# TYPE ceph_prioritycache:kv_reserved_bytes gauge
ceph_prioritycache:kv_reserved_bytes{ceph_daemon="mon.a"} 70500195.0
ceph_prioritycache:kv_reserved_bytes{ceph_daemon="osd.0"} 280638136.0
ceph_prioritycache:kv_reserved_bytes{ceph_daemon="osd.1"} 280329288.0
ceph_prioritycache:kv_reserved_bytes{ceph_daemon="osd.2"} 281011701.0
# HELP ceph_rocksdb_compact Compactions
# TYPE ceph_rocksdb_compact counter
ceph_rocksdb_compact{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_compact_queue_len Length of compaction queue
# TYPE ceph_rocksdb_compact_queue_len gauge
ceph_rocksdb_compact_queue_len{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_compact_queue_merge Mergings of ranges in compaction queue
# TYPE ceph_rocksdb_compact_queue_merge counter
ceph_rocksdb_compact_queue_merge{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_compact_range Compactions by range
# TYPE ceph_rocksdb_compact_range counter
ceph_rocksdb_compact_range{ceph_daemon="mon.a"} 21073.0
# HELP ceph_rocksdb_get Gets
# TYPE ceph_rocksdb_get counter
ceph_rocksdb_get{ceph_daemon="mon.a"} 78895001.0
# HELP ceph_rocksdb_get_latency_sum Get latency Total
# TYPE ceph_rocksdb_get_latency_sum counter
ceph_rocksdb_get_latency_sum{ceph_daemon="mon.a"} 871.45074185
# HELP ceph_rocksdb_get_latency_count Get latency Count
# TYPE ceph_rocksdb_get_latency_count counter
ceph_rocksdb_get_latency_count{ceph_daemon="mon.a"} 78895001.0
# HELP ceph_rocksdb_rocksdb_write_delay_time_sum Rocksdb write delay time Total
# TYPE ceph_rocksdb_rocksdb_write_delay_time_sum counter
ceph_rocksdb_rocksdb_write_delay_time_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_rocksdb_write_delay_time_count Rocksdb write delay time Count
# TYPE ceph_rocksdb_rocksdb_write_delay_time_count counter
ceph_rocksdb_rocksdb_write_delay_time_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_rocksdb_write_memtable_time_sum Rocksdb write memtable time Total
# TYPE ceph_rocksdb_rocksdb_write_memtable_time_sum counter
ceph_rocksdb_rocksdb_write_memtable_time_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_rocksdb_write_memtable_time_count Rocksdb write memtable time Count
# TYPE ceph_rocksdb_rocksdb_write_memtable_time_count counter
ceph_rocksdb_rocksdb_write_memtable_time_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_rocksdb_write_pre_and_post_time_sum total time spent on writing a record, excluding write process Total
# TYPE ceph_rocksdb_rocksdb_write_pre_and_post_time_sum counter
ceph_rocksdb_rocksdb_write_pre_and_post_time_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_rocksdb_write_pre_and_post_time_count total time spent on writing a record, excluding write process Count
# TYPE ceph_rocksdb_rocksdb_write_pre_and_post_time_count counter
ceph_rocksdb_rocksdb_write_pre_and_post_time_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_rocksdb_write_wal_time_sum Rocksdb write wal time Total
# TYPE ceph_rocksdb_rocksdb_write_wal_time_sum counter
ceph_rocksdb_rocksdb_write_wal_time_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_rocksdb_write_wal_time_count Rocksdb write wal time Count
# TYPE ceph_rocksdb_rocksdb_write_wal_time_count counter
ceph_rocksdb_rocksdb_write_wal_time_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_submit_latency_sum Submit Latency Total
# TYPE ceph_rocksdb_submit_latency_sum counter
ceph_rocksdb_submit_latency_sum{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_submit_latency_count Submit Latency Count
# TYPE ceph_rocksdb_submit_latency_count counter
ceph_rocksdb_submit_latency_count{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_submit_sync_latency_sum Submit Sync Latency Total
# TYPE ceph_rocksdb_submit_sync_latency_sum counter
ceph_rocksdb_submit_sync_latency_sum{ceph_daemon="mon.a"} 19167.646233876
# HELP ceph_rocksdb_submit_sync_latency_count Submit Sync Latency Count
# TYPE ceph_rocksdb_submit_sync_latency_count counter
ceph_rocksdb_submit_sync_latency_count{ceph_daemon="mon.a"} 3956457.0
# HELP ceph_rocksdb_submit_transaction Submit transactions
# TYPE ceph_rocksdb_submit_transaction counter
ceph_rocksdb_submit_transaction{ceph_daemon="mon.a"} 0.0
# HELP ceph_rocksdb_submit_transaction_sync Submit transactions sync
# TYPE ceph_rocksdb_submit_transaction_sync counter
ceph_rocksdb_submit_transaction_sync{ceph_daemon="mon.a"} 3956457.0
# HELP ceph_bluefs_bytes_written_slow Bytes written to WAL/SSTs at slow device
# TYPE ceph_bluefs_bytes_written_slow counter
ceph_bluefs_bytes_written_slow{ceph_daemon="osd.0"} 0.0
ceph_bluefs_bytes_written_slow{ceph_daemon="osd.1"} 0.0
ceph_bluefs_bytes_written_slow{ceph_daemon="osd.2"} 0.0
# HELP ceph_bluefs_bytes_written_sst Bytes written to SSTs
# TYPE ceph_bluefs_bytes_written_sst counter
ceph_bluefs_bytes_written_sst{ceph_daemon="osd.0"} 327680.0
ceph_bluefs_bytes_written_sst{ceph_daemon="osd.1"} 602112.0
ceph_bluefs_bytes_written_sst{ceph_daemon="osd.2"} 10571776.0
# HELP ceph_bluefs_bytes_written_wal Bytes written to WAL
# TYPE ceph_bluefs_bytes_written_wal counter
ceph_bluefs_bytes_written_wal{ceph_daemon="osd.0"} 833417216.0
ceph_bluefs_bytes_written_wal{ceph_daemon="osd.1"} 935251968.0
ceph_bluefs_bytes_written_wal{ceph_daemon="osd.2"} 1564045312.0
# HELP ceph_bluefs_db_total_bytes Total bytes (main db device)
# TYPE ceph_bluefs_db_total_bytes gauge
ceph_bluefs_db_total_bytes{ceph_daemon="osd.0"} 1288372224.0
ceph_bluefs_db_total_bytes{ceph_daemon="osd.1"} 1288372224.0
ceph_bluefs_db_total_bytes{ceph_daemon="osd.2"} 1288372224.0
# HELP ceph_bluefs_db_used_bytes Used bytes (main db device)
# TYPE ceph_bluefs_db_used_bytes gauge
ceph_bluefs_db_used_bytes{ceph_daemon="osd.0"} 178192384.0
ceph_bluefs_db_used_bytes{ceph_daemon="osd.1"} 204931072.0
ceph_bluefs_db_used_bytes{ceph_daemon="osd.2"} 338100224.0
# HELP ceph_bluefs_log_bytes Size of the metadata log
# TYPE ceph_bluefs_log_bytes gauge
ceph_bluefs_log_bytes{ceph_daemon="osd.0"} 13950976.0
ceph_bluefs_log_bytes{ceph_daemon="osd.1"} 17354752.0
ceph_bluefs_log_bytes{ceph_daemon="osd.2"} 18907136.0
# HELP ceph_bluefs_logged_bytes Bytes written to the metadata log
# TYPE ceph_bluefs_logged_bytes counter
ceph_bluefs_logged_bytes{ceph_daemon="osd.0"} 9576448.0
ceph_bluefs_logged_bytes{ceph_daemon="osd.1"} 10665984.0
ceph_bluefs_logged_bytes{ceph_daemon="osd.2"} 18841600.0
# HELP ceph_bluefs_num_files File count
# TYPE ceph_bluefs_num_files gauge
ceph_bluefs_num_files{ceph_daemon="osd.0"} 11.0
ceph_bluefs_num_files{ceph_daemon="osd.1"} 12.0
ceph_bluefs_num_files{ceph_daemon="osd.2"} 12.0
# HELP ceph_bluefs_read_bytes Bytes requested in buffered read mode
# TYPE ceph_bluefs_read_bytes counter
ceph_bluefs_read_bytes{ceph_daemon="osd.0"} 13347719.0
ceph_bluefs_read_bytes{ceph_daemon="osd.1"} 18255178.0
ceph_bluefs_read_bytes{ceph_daemon="osd.2"} 1884677.0
# HELP ceph_bluefs_read_prefetch_bytes Bytes requested in prefetch read mode
# TYPE ceph_bluefs_read_prefetch_bytes counter
ceph_bluefs_read_prefetch_bytes{ceph_daemon="osd.0"} 1164974.0
ceph_bluefs_read_prefetch_bytes{ceph_daemon="osd.1"} 1660937.0
ceph_bluefs_read_prefetch_bytes{ceph_daemon="osd.2"} 1805326.0
# HELP ceph_bluefs_read_random_buffer_bytes Bytes read from prefetch buffer in random read mode
# TYPE ceph_bluefs_read_random_buffer_bytes counter
ceph_bluefs_read_random_buffer_bytes{ceph_daemon="osd.0"} 372938.0
ceph_bluefs_read_random_buffer_bytes{ceph_daemon="osd.1"} 742263.0
ceph_bluefs_read_random_buffer_bytes{ceph_daemon="osd.2"} 19574.0
# HELP ceph_bluefs_read_random_bytes Bytes requested in random read mode
# TYPE ceph_bluefs_read_random_bytes counter
ceph_bluefs_read_random_bytes{ceph_daemon="osd.0"} 708485.0
ceph_bluefs_read_random_bytes{ceph_daemon="osd.1"} 1221237.0
ceph_bluefs_read_random_bytes{ceph_daemon="osd.2"} 1032806.0
# HELP ceph_bluefs_read_random_disk_bytes Bytes read from disk in random read mode
# TYPE ceph_bluefs_read_random_disk_bytes counter
ceph_bluefs_read_random_disk_bytes{ceph_daemon="osd.0"} 335547.0
ceph_bluefs_read_random_disk_bytes{ceph_daemon="osd.1"} 478974.0
ceph_bluefs_read_random_disk_bytes{ceph_daemon="osd.2"} 1013232.0
# HELP ceph_bluefs_slow_total_bytes Total bytes (slow device)
# TYPE ceph_bluefs_slow_total_bytes gauge
ceph_bluefs_slow_total_bytes{ceph_daemon="osd.0"} 0.0
ceph_bluefs_slow_total_bytes{ceph_daemon="osd.1"} 0.0
ceph_bluefs_slow_total_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_bluefs_slow_used_bytes Used bytes (slow device)
# TYPE ceph_bluefs_slow_used_bytes gauge
ceph_bluefs_slow_used_bytes{ceph_daemon="osd.0"} 0.0
ceph_bluefs_slow_used_bytes{ceph_daemon="osd.1"} 0.0
ceph_bluefs_slow_used_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_bluefs_wal_total_bytes Total bytes (wal device)
# TYPE ceph_bluefs_wal_total_bytes gauge
ceph_bluefs_wal_total_bytes{ceph_daemon="osd.0"} 0.0
ceph_bluefs_wal_total_bytes{ceph_daemon="osd.1"} 0.0
ceph_bluefs_wal_total_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_bluefs_wal_used_bytes Used bytes (wal device)
# TYPE ceph_bluefs_wal_used_bytes gauge
ceph_bluefs_wal_used_bytes{ceph_daemon="osd.0"} 0.0
ceph_bluefs_wal_used_bytes{ceph_daemon="osd.1"} 0.0
ceph_bluefs_wal_used_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_bluestore_bluestore_compressed Sum for stored compressed bytes
# TYPE ceph_bluestore_bluestore_compressed gauge
ceph_bluestore_bluestore_compressed{ceph_daemon="osd.0"} 0.0
ceph_bluestore_bluestore_compressed{ceph_daemon="osd.1"} 0.0
ceph_bluestore_bluestore_compressed{ceph_daemon="osd.2"} 0.0
# HELP ceph_bluestore_bluestore_compressed_allocated Sum for bytes allocated for compressed data
# TYPE ceph_bluestore_bluestore_compressed_allocated gauge
ceph_bluestore_bluestore_compressed_allocated{ceph_daemon="osd.0"} 0.0
ceph_bluestore_bluestore_compressed_allocated{ceph_daemon="osd.1"} 0.0
ceph_bluestore_bluestore_compressed_allocated{ceph_daemon="osd.2"} 0.0
# HELP ceph_bluestore_bluestore_compressed_original Sum for original bytes that were compressed
# TYPE ceph_bluestore_bluestore_compressed_original gauge
ceph_bluestore_bluestore_compressed_original{ceph_daemon="osd.0"} 0.0
ceph_bluestore_bluestore_compressed_original{ceph_daemon="osd.1"} 0.0
ceph_bluestore_bluestore_compressed_original{ceph_daemon="osd.2"} 0.0
# HELP ceph_bluestore_commit_lat_sum Average commit latency Total
# TYPE ceph_bluestore_commit_lat_sum counter
ceph_bluestore_commit_lat_sum{ceph_daemon="osd.0"} 340.295878507
ceph_bluestore_commit_lat_sum{ceph_daemon="osd.1"} 366.131703857
ceph_bluestore_commit_lat_sum{ceph_daemon="osd.2"} 726.98301177
# HELP ceph_bluestore_commit_lat_count Average commit latency Count
# TYPE ceph_bluestore_commit_lat_count counter
ceph_bluestore_commit_lat_count{ceph_daemon="osd.0"} 145523.0
ceph_bluestore_commit_lat_count{ceph_daemon="osd.1"} 161431.0
ceph_bluestore_commit_lat_count{ceph_daemon="osd.2"} 264557.0
# HELP ceph_bluestore_kv_final_lat_sum Average kv_finalize thread latency Total
# TYPE ceph_bluestore_kv_final_lat_sum counter
ceph_bluestore_kv_final_lat_sum{ceph_daemon="osd.0"} 5.231625237
ceph_bluestore_kv_final_lat_sum{ceph_daemon="osd.1"} 5.901987043
ceph_bluestore_kv_final_lat_sum{ceph_daemon="osd.2"} 8.829349573
# HELP ceph_bluestore_kv_final_lat_count Average kv_finalize thread latency Count
# TYPE ceph_bluestore_kv_final_lat_count counter
ceph_bluestore_kv_final_lat_count{ceph_daemon="osd.0"} 131520.0
ceph_bluestore_kv_final_lat_count{ceph_daemon="osd.1"} 148078.0
ceph_bluestore_kv_final_lat_count{ceph_daemon="osd.2"} 239887.0
# HELP ceph_bluestore_kv_flush_lat_sum Average kv_thread flush latency Total
# TYPE ceph_bluestore_kv_flush_lat_sum counter
ceph_bluestore_kv_flush_lat_sum{ceph_daemon="osd.0"} 0.048292212
ceph_bluestore_kv_flush_lat_sum{ceph_daemon="osd.1"} 0.054372589
ceph_bluestore_kv_flush_lat_sum{ceph_daemon="osd.2"} 0.080748781
# HELP ceph_bluestore_kv_flush_lat_count Average kv_thread flush latency Count
# TYPE ceph_bluestore_kv_flush_lat_count counter
ceph_bluestore_kv_flush_lat_count{ceph_daemon="osd.0"} 131535.0
ceph_bluestore_kv_flush_lat_count{ceph_daemon="osd.1"} 148105.0
ceph_bluestore_kv_flush_lat_count{ceph_daemon="osd.2"} 239982.0
# HELP ceph_bluestore_kv_sync_lat_sum Average kv_sync thread latency Total
# TYPE ceph_bluestore_kv_sync_lat_sum counter
ceph_bluestore_kv_sync_lat_sum{ceph_daemon="osd.0"} 213.725926457
ceph_bluestore_kv_sync_lat_sum{ceph_daemon="osd.1"} 240.653859567
ceph_bluestore_kv_sync_lat_sum{ceph_daemon="osd.2"} 448.951882951
# HELP ceph_bluestore_kv_sync_lat_count Average kv_sync thread latency Count
# TYPE ceph_bluestore_kv_sync_lat_count counter
ceph_bluestore_kv_sync_lat_count{ceph_daemon="osd.0"} 131535.0
ceph_bluestore_kv_sync_lat_count{ceph_daemon="osd.1"} 148105.0
ceph_bluestore_kv_sync_lat_count{ceph_daemon="osd.2"} 239982.0
# HELP ceph_bluestore_read_lat_sum Average read latency Total
# TYPE ceph_bluestore_read_lat_sum counter
ceph_bluestore_read_lat_sum{ceph_daemon="osd.0"} 0.117666618
ceph_bluestore_read_lat_sum{ceph_daemon="osd.1"} 0.171958688
ceph_bluestore_read_lat_sum{ceph_daemon="osd.2"} 0.432892661
# HELP ceph_bluestore_read_lat_count Average read latency Count
# TYPE ceph_bluestore_read_lat_count counter
ceph_bluestore_read_lat_count{ceph_daemon="osd.0"} 3220.0
ceph_bluestore_read_lat_count{ceph_daemon="osd.1"} 2837.0
ceph_bluestore_read_lat_count{ceph_daemon="osd.2"} 9369.0
# HELP ceph_bluestore_state_aio_wait_lat_sum Average aio_wait state latency Total
# TYPE ceph_bluestore_state_aio_wait_lat_sum counter
ceph_bluestore_state_aio_wait_lat_sum{ceph_daemon="osd.0"} 0.033118935
ceph_bluestore_state_aio_wait_lat_sum{ceph_daemon="osd.1"} 0.041101682
ceph_bluestore_state_aio_wait_lat_sum{ceph_daemon="osd.2"} 0.060712056
# HELP ceph_bluestore_state_aio_wait_lat_count Average aio_wait state latency Count
# TYPE ceph_bluestore_state_aio_wait_lat_count counter
ceph_bluestore_state_aio_wait_lat_count{ceph_daemon="osd.0"} 145523.0
ceph_bluestore_state_aio_wait_lat_count{ceph_daemon="osd.1"} 161431.0
ceph_bluestore_state_aio_wait_lat_count{ceph_daemon="osd.2"} 264557.0
# HELP ceph_bluestore_submit_lat_sum Average submit latency Total
# TYPE ceph_bluestore_submit_lat_sum counter
ceph_bluestore_submit_lat_sum{ceph_daemon="osd.0"} 11.06778419
ceph_bluestore_submit_lat_sum{ceph_daemon="osd.1"} 12.465797963
ceph_bluestore_submit_lat_sum{ceph_daemon="osd.2"} 19.909814491
# HELP ceph_bluestore_submit_lat_count Average submit latency Count
# TYPE ceph_bluestore_submit_lat_count counter
ceph_bluestore_submit_lat_count{ceph_daemon="osd.0"} 145523.0
ceph_bluestore_submit_lat_count{ceph_daemon="osd.1"} 161431.0
ceph_bluestore_submit_lat_count{ceph_daemon="osd.2"} 264557.0
# HELP ceph_bluestore_throttle_lat_sum Average submit throttle latency Total
# TYPE ceph_bluestore_throttle_lat_sum counter
ceph_bluestore_throttle_lat_sum{ceph_daemon="osd.0"} 1.765766974
ceph_bluestore_throttle_lat_sum{ceph_daemon="osd.1"} 1.921322465
ceph_bluestore_throttle_lat_sum{ceph_daemon="osd.2"} 3.187246191
# HELP ceph_bluestore_throttle_lat_count Average submit throttle latency Count
# TYPE ceph_bluestore_throttle_lat_count counter
ceph_bluestore_throttle_lat_count{ceph_daemon="osd.0"} 145523.0
ceph_bluestore_throttle_lat_count{ceph_daemon="osd.1"} 161431.0
ceph_bluestore_throttle_lat_count{ceph_daemon="osd.2"} 264557.0
# HELP ceph_objecter_op_active Operations active
# TYPE ceph_objecter_op_active gauge
ceph_objecter_op_active{ceph_daemon="osd.0"} 0.0
ceph_objecter_op_active{ceph_daemon="osd.1"} 0.0
ceph_objecter_op_active{ceph_daemon="osd.2"} 0.0
ceph_objecter_op_active{ceph_daemon="rgw.my.store.a"} 0.0
ceph_objecter_op_active{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_objecter_op_r Read operations
# TYPE ceph_objecter_op_r counter
ceph_objecter_op_r{ceph_daemon="osd.0"} 0.0
ceph_objecter_op_r{ceph_daemon="osd.1"} 0.0
ceph_objecter_op_r{ceph_daemon="osd.2"} 0.0
ceph_objecter_op_r{ceph_daemon="rgw.my.store.a"} 61039.0
ceph_objecter_op_r{ceph_daemon="rgw.my.store.b"} 58221.0
# HELP ceph_objecter_op_rmw Read-modify-write operations
# TYPE ceph_objecter_op_rmw counter
ceph_objecter_op_rmw{ceph_daemon="osd.0"} 0.0
ceph_objecter_op_rmw{ceph_daemon="osd.1"} 0.0
ceph_objecter_op_rmw{ceph_daemon="osd.2"} 0.0
ceph_objecter_op_rmw{ceph_daemon="rgw.my.store.a"} 0.0
ceph_objecter_op_rmw{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_objecter_op_w Write operations
# TYPE ceph_objecter_op_w counter
ceph_objecter_op_w{ceph_daemon="osd.0"} 0.0
ceph_objecter_op_w{ceph_daemon="osd.1"} 0.0
ceph_objecter_op_w{ceph_daemon="osd.2"} 0.0
ceph_objecter_op_w{ceph_daemon="rgw.my.store.a"} 520188.0
ceph_objecter_op_w{ceph_daemon="rgw.my.store.b"} 490992.0
# HELP ceph_osd_numpg Placement groups
# TYPE ceph_osd_numpg gauge
ceph_osd_numpg{ceph_daemon="osd.0"} 27.0
ceph_osd_numpg{ceph_daemon="osd.1"} 24.0
ceph_osd_numpg{ceph_daemon="osd.2"} 30.0
# HELP ceph_osd_numpg_removing Placement groups queued for local deletion
# TYPE ceph_osd_numpg_removing gauge
ceph_osd_numpg_removing{ceph_daemon="osd.0"} 0.0
ceph_osd_numpg_removing{ceph_daemon="osd.1"} 0.0
ceph_osd_numpg_removing{ceph_daemon="osd.2"} 0.0
# HELP ceph_osd_op Client operations
# TYPE ceph_osd_op counter
ceph_osd_op{ceph_daemon="osd.0"} 766547.0
ceph_osd_op{ceph_daemon="osd.1"} 792255.0
ceph_osd_op{ceph_daemon="osd.2"} 1528522.0
# HELP ceph_osd_op_in_bytes Client operations total write size
# TYPE ceph_osd_op_in_bytes counter
ceph_osd_op_in_bytes{ceph_daemon="osd.0"} 5099.0
ceph_osd_op_in_bytes{ceph_daemon="osd.1"} 5887.0
ceph_osd_op_in_bytes{ceph_daemon="osd.2"} 9192.0
# HELP ceph_osd_op_latency_sum Latency of client operations (including queue time) Total
# TYPE ceph_osd_op_latency_sum counter
ceph_osd_op_latency_sum{ceph_daemon="osd.0"} 484.939722937
ceph_osd_op_latency_sum{ceph_daemon="osd.1"} 547.563473499
ceph_osd_op_latency_sum{ceph_daemon="osd.2"} 977.987879996
# HELP ceph_osd_op_latency_count Latency of client operations (including queue time) Count
# TYPE ceph_osd_op_latency_count counter
ceph_osd_op_latency_count{ceph_daemon="osd.0"} 766547.0
ceph_osd_op_latency_count{ceph_daemon="osd.1"} 792255.0
ceph_osd_op_latency_count{ceph_daemon="osd.2"} 1528522.0
# HELP ceph_osd_op_out_bytes Client operations total read size
# TYPE ceph_osd_op_out_bytes counter
ceph_osd_op_out_bytes{ceph_daemon="osd.0"} 979844.0
ceph_osd_op_out_bytes{ceph_daemon="osd.1"} 1149710.0
ceph_osd_op_out_bytes{ceph_daemon="osd.2"} 2703949.0
# HELP ceph_osd_op_prepare_latency_sum Latency of client operations (excluding queue time and wait for finished) Total
# TYPE ceph_osd_op_prepare_latency_sum counter
ceph_osd_op_prepare_latency_sum{ceph_daemon="osd.0"} 132.250278457
ceph_osd_op_prepare_latency_sum{ceph_daemon="osd.1"} 118.469018929
ceph_osd_op_prepare_latency_sum{ceph_daemon="osd.2"} 261.922833493
# HELP ceph_osd_op_prepare_latency_count Latency of client operations (excluding queue time and wait for finished) Count
# TYPE ceph_osd_op_prepare_latency_count counter
ceph_osd_op_prepare_latency_count{ceph_daemon="osd.0"} 781182.0
ceph_osd_op_prepare_latency_count{ceph_daemon="osd.1"} 806917.0
ceph_osd_op_prepare_latency_count{ceph_daemon="osd.2"} 1560415.0
# HELP ceph_osd_op_process_latency_sum Latency of client operations (excluding queue time) Total
# TYPE ceph_osd_op_process_latency_sum counter
ceph_osd_op_process_latency_sum{ceph_daemon="osd.0"} 313.807617274
ceph_osd_op_process_latency_sum{ceph_daemon="osd.1"} 339.246691439
ceph_osd_op_process_latency_sum{ceph_daemon="osd.2"} 621.735196912
# HELP ceph_osd_op_process_latency_count Latency of client operations (excluding queue time) Count
# TYPE ceph_osd_op_process_latency_count counter
ceph_osd_op_process_latency_count{ceph_daemon="osd.0"} 766547.0
ceph_osd_op_process_latency_count{ceph_daemon="osd.1"} 792255.0
ceph_osd_op_process_latency_count{ceph_daemon="osd.2"} 1528522.0
# HELP ceph_osd_op_r Client read operations
# TYPE ceph_osd_op_r counter
ceph_osd_op_r{ceph_daemon="osd.0"} 636925.0
ceph_osd_op_r{ceph_daemon="osd.1"} 646607.0
ceph_osd_op_r{ceph_daemon="osd.2"} 1301033.0
# HELP ceph_osd_op_r_latency_sum Latency of read operation (including queue time) Total
# TYPE ceph_osd_op_r_latency_sum counter
ceph_osd_op_r_latency_sum{ceph_daemon="osd.0"} 213.305369671
ceph_osd_op_r_latency_sum{ceph_daemon="osd.1"} 242.114068587
ceph_osd_op_r_latency_sum{ceph_daemon="osd.2"} 447.432472053
# HELP ceph_osd_op_r_latency_count Latency of read operation (including queue time) Count
# TYPE ceph_osd_op_r_latency_count counter
ceph_osd_op_r_latency_count{ceph_daemon="osd.0"} 636925.0
ceph_osd_op_r_latency_count{ceph_daemon="osd.1"} 646607.0
ceph_osd_op_r_latency_count{ceph_daemon="osd.2"} 1301033.0
# HELP ceph_osd_op_r_out_bytes Client data read
# TYPE ceph_osd_op_r_out_bytes counter
ceph_osd_op_r_out_bytes{ceph_daemon="osd.0"} 979844.0
ceph_osd_op_r_out_bytes{ceph_daemon="osd.1"} 1149710.0
ceph_osd_op_r_out_bytes{ceph_daemon="osd.2"} 2703949.0
# HELP ceph_osd_op_r_prepare_latency_sum Latency of read operations (excluding queue time and wait for finished) Total
# TYPE ceph_osd_op_r_prepare_latency_sum counter
ceph_osd_op_r_prepare_latency_sum{ceph_daemon="osd.0"} 99.670571638
ceph_osd_op_r_prepare_latency_sum{ceph_daemon="osd.1"} 81.85564911
ceph_osd_op_r_prepare_latency_sum{ceph_daemon="osd.2"} 206.81346294
# HELP ceph_osd_op_r_prepare_latency_count Latency of read operations (excluding queue time and wait for finished) Count
# TYPE ceph_osd_op_r_prepare_latency_count counter
ceph_osd_op_r_prepare_latency_count{ceph_daemon="osd.0"} 636933.0
ceph_osd_op_r_prepare_latency_count{ceph_daemon="osd.1"} 646610.0
ceph_osd_op_r_prepare_latency_count{ceph_daemon="osd.2"} 1301041.0
# HELP ceph_osd_op_r_process_latency_sum Latency of read operation (excluding queue time) Total
# TYPE ceph_osd_op_r_process_latency_sum counter
ceph_osd_op_r_process_latency_sum{ceph_daemon="osd.0"} 55.391489815
ceph_osd_op_r_process_latency_sum{ceph_daemon="osd.1"} 48.74943897
ceph_osd_op_r_process_latency_sum{ceph_daemon="osd.2"} 115.282851336
# HELP ceph_osd_op_r_process_latency_count Latency of read operation (excluding queue time) Count
# TYPE ceph_osd_op_r_process_latency_count counter
ceph_osd_op_r_process_latency_count{ceph_daemon="osd.0"} 636925.0
ceph_osd_op_r_process_latency_count{ceph_daemon="osd.1"} 646607.0
ceph_osd_op_r_process_latency_count{ceph_daemon="osd.2"} 1301033.0
# HELP ceph_osd_op_rw Client read-modify-write operations
# TYPE ceph_osd_op_rw counter
ceph_osd_op_rw{ceph_daemon="osd.0"} 129551.0
ceph_osd_op_rw{ceph_daemon="osd.1"} 145587.0
ceph_osd_op_rw{ceph_daemon="osd.2"} 226335.0
# HELP ceph_osd_op_rw_in_bytes Client read-modify-write operations write in
# TYPE ceph_osd_op_rw_in_bytes counter
ceph_osd_op_rw_in_bytes{ceph_daemon="osd.0"} 2591.0
ceph_osd_op_rw_in_bytes{ceph_daemon="osd.1"} 4728.0
ceph_osd_op_rw_in_bytes{ceph_daemon="osd.2"} 5524.0
# HELP ceph_osd_op_rw_latency_sum Latency of read-modify-write operation (including queue time) Total
# TYPE ceph_osd_op_rw_latency_sum counter
ceph_osd_op_rw_latency_sum{ceph_daemon="osd.0"} 271.482053273
ceph_osd_op_rw_latency_sum{ceph_daemon="osd.1"} 305.316303756
ceph_osd_op_rw_latency_sum{ceph_daemon="osd.2"} 527.409151858
# HELP ceph_osd_op_rw_latency_count Latency of read-modify-write operation (including queue time) Count
# TYPE ceph_osd_op_rw_latency_count counter
ceph_osd_op_rw_latency_count{ceph_daemon="osd.0"} 129551.0
ceph_osd_op_rw_latency_count{ceph_daemon="osd.1"} 145587.0
ceph_osd_op_rw_latency_count{ceph_daemon="osd.2"} 226335.0
# HELP ceph_osd_op_rw_out_bytes Client read-modify-write operations read out
# TYPE ceph_osd_op_rw_out_bytes counter
ceph_osd_op_rw_out_bytes{ceph_daemon="osd.0"} 0.0
ceph_osd_op_rw_out_bytes{ceph_daemon="osd.1"} 0.0
ceph_osd_op_rw_out_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_osd_op_rw_prepare_latency_sum Latency of read-modify-write operations (excluding queue time and wait for finished) Total
# TYPE ceph_osd_op_rw_prepare_latency_sum counter
ceph_osd_op_rw_prepare_latency_sum{ceph_daemon="osd.0"} 32.5700223
ceph_osd_op_rw_prepare_latency_sum{ceph_daemon="osd.1"} 36.607380939
ceph_osd_op_rw_prepare_latency_sum{ceph_daemon="osd.2"} 54.969257178
# HELP ceph_osd_op_rw_prepare_latency_count Latency of read-modify-write operations (excluding queue time and wait for finished) Count
# TYPE ceph_osd_op_rw_prepare_latency_count counter
ceph_osd_op_rw_prepare_latency_count{ceph_daemon="osd.0"} 144177.0
ceph_osd_op_rw_prepare_latency_count{ceph_daemon="osd.1"} 160246.0
ceph_osd_op_rw_prepare_latency_count{ceph_daemon="osd.2"} 258220.0
# HELP ceph_osd_op_rw_process_latency_sum Latency of read-modify-write operation (excluding queue time) Total
# TYPE ceph_osd_op_rw_process_latency_sum counter
ceph_osd_op_rw_process_latency_sum{ceph_daemon="osd.0"} 258.272175103
ceph_osd_op_rw_process_latency_sum{ceph_daemon="osd.1"} 290.370468303
ceph_osd_op_rw_process_latency_sum{ceph_daemon="osd.2"} 503.425710401
# HELP ceph_osd_op_rw_process_latency_count Latency of read-modify-write operation (excluding queue time) Count
# TYPE ceph_osd_op_rw_process_latency_count counter
ceph_osd_op_rw_process_latency_count{ceph_daemon="osd.0"} 129551.0
ceph_osd_op_rw_process_latency_count{ceph_daemon="osd.1"} 145587.0
ceph_osd_op_rw_process_latency_count{ceph_daemon="osd.2"} 226335.0
# HELP ceph_osd_op_w Client write operations
# TYPE ceph_osd_op_w counter
ceph_osd_op_w{ceph_daemon="osd.0"} 71.0
ceph_osd_op_w{ceph_daemon="osd.1"} 61.0
ceph_osd_op_w{ceph_daemon="osd.2"} 1154.0
# HELP ceph_osd_op_w_in_bytes Client data written
# TYPE ceph_osd_op_w_in_bytes counter
ceph_osd_op_w_in_bytes{ceph_daemon="osd.0"} 2508.0
ceph_osd_op_w_in_bytes{ceph_daemon="osd.1"} 1159.0
ceph_osd_op_w_in_bytes{ceph_daemon="osd.2"} 3668.0
# HELP ceph_osd_op_w_latency_sum Latency of write operation (including queue time) Total
# TYPE ceph_osd_op_w_latency_sum counter
ceph_osd_op_w_latency_sum{ceph_daemon="osd.0"} 0.152299993
ceph_osd_op_w_latency_sum{ceph_daemon="osd.1"} 0.133101156
ceph_osd_op_w_latency_sum{ceph_daemon="osd.2"} 3.146256085
# HELP ceph_osd_op_w_latency_count Latency of write operation (including queue time) Count
# TYPE ceph_osd_op_w_latency_count counter
ceph_osd_op_w_latency_count{ceph_daemon="osd.0"} 71.0
ceph_osd_op_w_latency_count{ceph_daemon="osd.1"} 61.0
ceph_osd_op_w_latency_count{ceph_daemon="osd.2"} 1154.0
# HELP ceph_osd_op_w_prepare_latency_sum Latency of write operations (excluding queue time and wait for finished) Total
# TYPE ceph_osd_op_w_prepare_latency_sum counter
ceph_osd_op_w_prepare_latency_sum{ceph_daemon="osd.0"} 0.009684519
ceph_osd_op_w_prepare_latency_sum{ceph_daemon="osd.1"} 0.00598888
ceph_osd_op_w_prepare_latency_sum{ceph_daemon="osd.2"} 0.140113375
# HELP ceph_osd_op_w_prepare_latency_count Latency of write operations (excluding queue time and wait for finished) Count
# TYPE ceph_osd_op_w_prepare_latency_count counter
ceph_osd_op_w_prepare_latency_count{ceph_daemon="osd.0"} 72.0
ceph_osd_op_w_prepare_latency_count{ceph_daemon="osd.1"} 61.0
ceph_osd_op_w_prepare_latency_count{ceph_daemon="osd.2"} 1154.0
# HELP ceph_osd_op_w_process_latency_sum Latency of write operation (excluding queue time) Total
# TYPE ceph_osd_op_w_process_latency_sum counter
ceph_osd_op_w_process_latency_sum{ceph_daemon="osd.0"} 0.143952356
ceph_osd_op_w_process_latency_sum{ceph_daemon="osd.1"} 0.126784166
ceph_osd_op_w_process_latency_sum{ceph_daemon="osd.2"} 3.026635175
# HELP ceph_osd_op_w_process_latency_count Latency of write operation (excluding queue time) Count
# TYPE ceph_osd_op_w_process_latency_count counter
ceph_osd_op_w_process_latency_count{ceph_daemon="osd.0"} 71.0
ceph_osd_op_w_process_latency_count{ceph_daemon="osd.1"} 61.0
ceph_osd_op_w_process_latency_count{ceph_daemon="osd.2"} 1154.0
# HELP ceph_osd_op_wip Replication operations currently being processed (primary)
# TYPE ceph_osd_op_wip gauge
ceph_osd_op_wip{ceph_daemon="osd.0"} 0.0
ceph_osd_op_wip{ceph_daemon="osd.1"} 0.0
ceph_osd_op_wip{ceph_daemon="osd.2"} 0.0
# HELP ceph_osd_recovery_bytes recovery bytes
# TYPE ceph_osd_recovery_bytes counter
ceph_osd_recovery_bytes{ceph_daemon="osd.0"} 0.0
ceph_osd_recovery_bytes{ceph_daemon="osd.1"} 0.0
ceph_osd_recovery_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_osd_recovery_ops Started recovery operations
# TYPE ceph_osd_recovery_ops counter
ceph_osd_recovery_ops{ceph_daemon="osd.0"} 0.0
ceph_osd_recovery_ops{ceph_daemon="osd.1"} 0.0
ceph_osd_recovery_ops{ceph_daemon="osd.2"} 0.0
# HELP ceph_osd_stat_bytes OSD size
# TYPE ceph_osd_stat_bytes gauge
ceph_osd_stat_bytes{ceph_daemon="osd.0"} 32208060416.0
ceph_osd_stat_bytes{ceph_daemon="osd.1"} 32208060416.0
ceph_osd_stat_bytes{ceph_daemon="osd.2"} 32208060416.0
# HELP ceph_osd_stat_bytes_used Used space
# TYPE ceph_osd_stat_bytes_used gauge
ceph_osd_stat_bytes_used{ceph_daemon="osd.0"} 1168637952.0
ceph_osd_stat_bytes_used{ceph_daemon="osd.1"} 1168113664.0
ceph_osd_stat_bytes_used{ceph_daemon="osd.2"} 1167982592.0
# HELP ceph_prioritycache:data_committed_bytes total bytes committed,
# TYPE ceph_prioritycache:data_committed_bytes gauge
ceph_prioritycache:data_committed_bytes{ceph_daemon="osd.0"} 2130706432.0
ceph_prioritycache:data_committed_bytes{ceph_daemon="osd.1"} 2130706432.0
ceph_prioritycache:data_committed_bytes{ceph_daemon="osd.2"} 2130706432.0
# HELP ceph_prioritycache:data_pri0_bytes bytes allocated to pri0
# TYPE ceph_prioritycache:data_pri0_bytes gauge
ceph_prioritycache:data_pri0_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri0_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri0_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri10_bytes bytes allocated to pri10
# TYPE ceph_prioritycache:data_pri10_bytes gauge
ceph_prioritycache:data_pri10_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri10_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri10_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri11_bytes bytes allocated to pri11
# TYPE ceph_prioritycache:data_pri11_bytes gauge
ceph_prioritycache:data_pri11_bytes{ceph_daemon="osd.0"} 1847404555.0
ceph_prioritycache:data_pri11_bytes{ceph_daemon="osd.1"} 1847294716.0
ceph_prioritycache:data_pri11_bytes{ceph_daemon="osd.2"} 1847071381.0
# HELP ceph_prioritycache:data_pri1_bytes bytes allocated to pri1
# TYPE ceph_prioritycache:data_pri1_bytes gauge
ceph_prioritycache:data_pri1_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri1_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri1_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri2_bytes bytes allocated to pri2
# TYPE ceph_prioritycache:data_pri2_bytes gauge
ceph_prioritycache:data_pri2_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri2_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri2_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri3_bytes bytes allocated to pri3
# TYPE ceph_prioritycache:data_pri3_bytes gauge
ceph_prioritycache:data_pri3_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri3_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri3_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri4_bytes bytes allocated to pri4
# TYPE ceph_prioritycache:data_pri4_bytes gauge
ceph_prioritycache:data_pri4_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri4_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri4_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri5_bytes bytes allocated to pri5
# TYPE ceph_prioritycache:data_pri5_bytes gauge
ceph_prioritycache:data_pri5_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri5_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri5_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri6_bytes bytes allocated to pri6
# TYPE ceph_prioritycache:data_pri6_bytes gauge
ceph_prioritycache:data_pri6_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri6_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri6_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri7_bytes bytes allocated to pri7
# TYPE ceph_prioritycache:data_pri7_bytes gauge
ceph_prioritycache:data_pri7_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri7_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri7_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri8_bytes bytes allocated to pri8
# TYPE ceph_prioritycache:data_pri8_bytes gauge
ceph_prioritycache:data_pri8_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri8_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri8_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_pri9_bytes bytes allocated to pri9
# TYPE ceph_prioritycache:data_pri9_bytes gauge
ceph_prioritycache:data_pri9_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:data_pri9_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:data_pri9_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:data_reserved_bytes bytes reserved for future growth.
# TYPE ceph_prioritycache:data_reserved_bytes gauge
ceph_prioritycache:data_reserved_bytes{ceph_daemon="osd.0"} 283301877.0
ceph_prioritycache:data_reserved_bytes{ceph_daemon="osd.1"} 283411716.0
ceph_prioritycache:data_reserved_bytes{ceph_daemon="osd.2"} 283635051.0
# HELP ceph_prioritycache:meta_committed_bytes total bytes committed,
# TYPE ceph_prioritycache:meta_committed_bytes gauge
ceph_prioritycache:meta_committed_bytes{ceph_daemon="osd.0"} 3976200192.0
ceph_prioritycache:meta_committed_bytes{ceph_daemon="osd.1"} 3976200192.0
ceph_prioritycache:meta_committed_bytes{ceph_daemon="osd.2"} 3976200192.0
# HELP ceph_prioritycache:meta_pri0_bytes bytes allocated to pri0
# TYPE ceph_prioritycache:meta_pri0_bytes gauge
ceph_prioritycache:meta_pri0_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri0_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri0_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri10_bytes bytes allocated to pri10
# TYPE ceph_prioritycache:meta_pri10_bytes gauge
ceph_prioritycache:meta_pri10_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri10_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri10_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri11_bytes bytes allocated to pri11
# TYPE ceph_prioritycache:meta_pri11_bytes gauge
ceph_prioritycache:meta_pri11_bytes{ceph_daemon="osd.0"} 3694809112.0
ceph_prioritycache:meta_pri11_bytes{ceph_daemon="osd.1"} 3694589432.0
ceph_prioritycache:meta_pri11_bytes{ceph_daemon="osd.2"} 3694142763.0
# HELP ceph_prioritycache:meta_pri1_bytes bytes allocated to pri1
# TYPE ceph_prioritycache:meta_pri1_bytes gauge
ceph_prioritycache:meta_pri1_bytes{ceph_daemon="osd.0"} 801918.0
ceph_prioritycache:meta_pri1_bytes{ceph_daemon="osd.1"} 822588.0
ceph_prioritycache:meta_pri1_bytes{ceph_daemon="osd.2"} 2175006.0
# HELP ceph_prioritycache:meta_pri2_bytes bytes allocated to pri2
# TYPE ceph_prioritycache:meta_pri2_bytes gauge
ceph_prioritycache:meta_pri2_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri2_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri2_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri3_bytes bytes allocated to pri3
# TYPE ceph_prioritycache:meta_pri3_bytes gauge
ceph_prioritycache:meta_pri3_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri3_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri3_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri4_bytes bytes allocated to pri4
# TYPE ceph_prioritycache:meta_pri4_bytes gauge
ceph_prioritycache:meta_pri4_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri4_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri4_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri5_bytes bytes allocated to pri5
# TYPE ceph_prioritycache:meta_pri5_bytes gauge
ceph_prioritycache:meta_pri5_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri5_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri5_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri6_bytes bytes allocated to pri6
# TYPE ceph_prioritycache:meta_pri6_bytes gauge
ceph_prioritycache:meta_pri6_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri6_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri6_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri7_bytes bytes allocated to pri7
# TYPE ceph_prioritycache:meta_pri7_bytes gauge
ceph_prioritycache:meta_pri7_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri7_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri7_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri8_bytes bytes allocated to pri8
# TYPE ceph_prioritycache:meta_pri8_bytes gauge
ceph_prioritycache:meta_pri8_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri8_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri8_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_pri9_bytes bytes allocated to pri9
# TYPE ceph_prioritycache:meta_pri9_bytes gauge
ceph_prioritycache:meta_pri9_bytes{ceph_daemon="osd.0"} 0.0
ceph_prioritycache:meta_pri9_bytes{ceph_daemon="osd.1"} 0.0
ceph_prioritycache:meta_pri9_bytes{ceph_daemon="osd.2"} 0.0
# HELP ceph_prioritycache:meta_reserved_bytes bytes reserved for future growth.
# TYPE ceph_prioritycache:meta_reserved_bytes gauge
ceph_prioritycache:meta_reserved_bytes{ceph_daemon="osd.0"} 280589162.0
ceph_prioritycache:meta_reserved_bytes{ceph_daemon="osd.1"} 280788172.0
ceph_prioritycache:meta_reserved_bytes{ceph_daemon="osd.2"} 279882423.0
# HELP ceph_objecter_0x557236160fa0_op_active Operations active
# TYPE ceph_objecter_0x557236160fa0_op_active gauge
ceph_objecter_0x557236160fa0_op_active{ceph_daemon="rgw.my.store.a"} 0.0
# HELP ceph_objecter_0x557236160fa0_op_r Read operations
# TYPE ceph_objecter_0x557236160fa0_op_r counter
ceph_objecter_0x557236160fa0_op_r{ceph_daemon="rgw.my.store.a"} 10096.0
# HELP ceph_objecter_0x557236160fa0_op_rmw Read-modify-write operations
# TYPE ceph_objecter_0x557236160fa0_op_rmw counter
ceph_objecter_0x557236160fa0_op_rmw{ceph_daemon="rgw.my.store.a"} 0.0
# HELP ceph_objecter_0x557236160fa0_op_w Write operations
# TYPE ceph_objecter_0x557236160fa0_op_w counter
ceph_objecter_0x557236160fa0_op_w{ceph_daemon="rgw.my.store.a"} 134964.0
# HELP ceph_objecter_0x557236161900_op_active Operations active
# TYPE ceph_objecter_0x557236161900_op_active gauge
ceph_objecter_0x557236161900_op_active{ceph_daemon="rgw.my.store.a"} 0.0
# HELP ceph_objecter_0x557236161900_op_r Read operations
# TYPE ceph_objecter_0x557236161900_op_r counter
ceph_objecter_0x557236161900_op_r{ceph_daemon="rgw.my.store.a"} 0.0
# HELP ceph_objecter_0x557236161900_op_rmw Read-modify-write operations
# TYPE ceph_objecter_0x557236161900_op_rmw counter
ceph_objecter_0x557236161900_op_rmw{ceph_daemon="rgw.my.store.a"} 0.0
# HELP ceph_objecter_0x557236161900_op_w Write operations
# TYPE ceph_objecter_0x557236161900_op_w counter
ceph_objecter_0x557236161900_op_w{ceph_daemon="rgw.my.store.a"} 54482.0
# HELP ceph_rgw_cache_hit Cache hits
# TYPE ceph_rgw_cache_hit counter
ceph_rgw_cache_hit{ceph_daemon="rgw.my.store.a"} 2.0
ceph_rgw_cache_hit{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_cache_miss Cache miss
# TYPE ceph_rgw_cache_miss counter
ceph_rgw_cache_miss{ceph_daemon="rgw.my.store.a"} 508.0
ceph_rgw_cache_miss{ceph_daemon="rgw.my.store.b"} 3.0
# HELP ceph_rgw_failed_req Aborted requests
# TYPE ceph_rgw_failed_req counter
ceph_rgw_failed_req{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_failed_req{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_gc_retire_object GC object retires
# TYPE ceph_rgw_gc_retire_object counter
ceph_rgw_gc_retire_object{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_gc_retire_object{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_get Gets
# TYPE ceph_rgw_get counter
ceph_rgw_get{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_get{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_get_b Size of gets
# TYPE ceph_rgw_get_b counter
ceph_rgw_get_b{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_get_b{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_get_initial_lat_sum Get latency Total
# TYPE ceph_rgw_get_initial_lat_sum counter
ceph_rgw_get_initial_lat_sum{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_get_initial_lat_sum{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_get_initial_lat_count Get latency Count
# TYPE ceph_rgw_get_initial_lat_count counter
ceph_rgw_get_initial_lat_count{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_get_initial_lat_count{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_keystone_token_cache_hit Keystone token cache hits
# TYPE ceph_rgw_keystone_token_cache_hit counter
ceph_rgw_keystone_token_cache_hit{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_keystone_token_cache_hit{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_keystone_token_cache_miss Keystone token cache miss
# TYPE ceph_rgw_keystone_token_cache_miss counter
ceph_rgw_keystone_token_cache_miss{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_keystone_token_cache_miss{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_event_lost Pubsub events lost
# TYPE ceph_rgw_pubsub_event_lost counter
ceph_rgw_pubsub_event_lost{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_event_lost{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_event_triggered Pubsub events with at least one topic
# TYPE ceph_rgw_pubsub_event_triggered counter
ceph_rgw_pubsub_event_triggered{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_event_triggered{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_events Pubsub events in store
# TYPE ceph_rgw_pubsub_events gauge
ceph_rgw_pubsub_events{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_events{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_missing_conf Pubsub events could not be handled because of missing configuration
# TYPE ceph_rgw_pubsub_missing_conf counter
ceph_rgw_pubsub_missing_conf{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_missing_conf{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_push_failed Pubsub events failed to be pushed to an endpoint
# TYPE ceph_rgw_pubsub_push_failed counter
ceph_rgw_pubsub_push_failed{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_push_failed{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_push_ok Pubsub events pushed to an endpoint
# TYPE ceph_rgw_pubsub_push_ok counter
ceph_rgw_pubsub_push_ok{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_push_ok{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_push_pending Pubsub events pending reply from endpoint
# TYPE ceph_rgw_pubsub_push_pending gauge
ceph_rgw_pubsub_push_pending{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_push_pending{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_store_fail Pubsub events failed to be stored
# TYPE ceph_rgw_pubsub_store_fail counter
ceph_rgw_pubsub_store_fail{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_store_fail{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_pubsub_store_ok Pubsub events successfully stored
# TYPE ceph_rgw_pubsub_store_ok counter
ceph_rgw_pubsub_store_ok{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_pubsub_store_ok{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_put Puts
# TYPE ceph_rgw_put counter
ceph_rgw_put{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_put{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_put_b Size of puts
# TYPE ceph_rgw_put_b counter
ceph_rgw_put_b{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_put_b{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_put_initial_lat_sum Put latency Total
# TYPE ceph_rgw_put_initial_lat_sum counter
ceph_rgw_put_initial_lat_sum{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_put_initial_lat_sum{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_put_initial_lat_count Put latency Count
# TYPE ceph_rgw_put_initial_lat_count counter
ceph_rgw_put_initial_lat_count{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_put_initial_lat_count{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_qactive Active requests queue
# TYPE ceph_rgw_qactive gauge
ceph_rgw_qactive{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_qactive{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_qlen Queue length
# TYPE ceph_rgw_qlen gauge
ceph_rgw_qlen{ceph_daemon="rgw.my.store.a"} 0.0
ceph_rgw_qlen{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_rgw_req Requests
# TYPE ceph_rgw_req counter
ceph_rgw_req{ceph_daemon="rgw.my.store.a"} 27246.0
ceph_rgw_req{ceph_daemon="rgw.my.store.b"} 27243.0
# HELP ceph_objecter_0x561afed1af00_op_active Operations active
# TYPE ceph_objecter_0x561afed1af00_op_active gauge
ceph_objecter_0x561afed1af00_op_active{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_objecter_0x561afed1af00_op_r Read operations
# TYPE ceph_objecter_0x561afed1af00_op_r counter
ceph_objecter_0x561afed1af00_op_r{ceph_daemon="rgw.my.store.b"} 10080.0
# HELP ceph_objecter_0x561afed1af00_op_rmw Read-modify-write operations
# TYPE ceph_objecter_0x561afed1af00_op_rmw counter
ceph_objecter_0x561afed1af00_op_rmw{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_objecter_0x561afed1af00_op_w Write operations
# TYPE ceph_objecter_0x561afed1af00_op_w counter
ceph_objecter_0x561afed1af00_op_w{ceph_daemon="rgw.my.store.b"} 134932.0
# HELP ceph_objecter_0x561afed1ba40_op_active Operations active
# TYPE ceph_objecter_0x561afed1ba40_op_active gauge
ceph_objecter_0x561afed1ba40_op_active{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_objecter_0x561afed1ba40_op_r Read operations
# TYPE ceph_objecter_0x561afed1ba40_op_r counter
ceph_objecter_0x561afed1ba40_op_r{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_objecter_0x561afed1ba40_op_rmw Read-modify-write operations
# TYPE ceph_objecter_0x561afed1ba40_op_rmw counter
ceph_objecter_0x561afed1ba40_op_rmw{ceph_daemon="rgw.my.store.b"} 0.0
# HELP ceph_objecter_0x561afed1ba40_op_w Write operations
# TYPE ceph_objecter_0x561afed1ba40_op_w counter
ceph_objecter_0x561afed1ba40_op_w{ceph_daemon="rgw.my.store.b"} 54478.0
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment