Skip to content

Instantly share code, notes, and snippets.

@mulbc
Created September 4, 2019 08:50
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save mulbc/a6bba6caa33a01121cbaf785daab7fa2 to your computer and use it in GitHub Desktop.
Save mulbc/a6bba6caa33a01121cbaf785daab7fa2 to your computer and use it in GitHub Desktop.
RGW textfile collector
[Unit]
Description=Ceph RGW Prometheus Exporter
After=docker.service
[Service]
EnvironmentFile=-/etc/environment
ExecStart=/usr/local/bin/python3 /usr/bin/rgw_exporter.py
Restart=always
RestartSec=90s
TimeoutStartSec=300
TimeoutStopSec=15
[Install]
WantedBy=multi-user.target
[Unit]
Description=Run RGW Prometheus exporter every 30 minutes
[Timer]
# Every 30 minutes
OnCalendar=*:0/30
[Install]
WantedBy=timers.target
#!/usr/bin/env python3
# This script reads in information from the Ceph RGW and exposes it as a textfile to the node-exporter
# at /etc/prometheus/node-exporter/radosgw.prom
# Please ensure that you've updated the user credentials below and that the user has sufficient permissions
# Also ensure that your node-exporter will expect textfiles in /etc/prometheus/node-exporter/
from prometheus_client import CollectorRegistry, write_to_textfile, Gauge, Info
from rgwadmin import RGWAdmin, RGWUser
import json
import random
import requests
import shlex
import subprocess
import sys
import time
def parse_bucket_info(bucket_info):
try:
b_info.labels(name=bucket_info['bucket'], id=bucket_info['id']).info({
'index_type': bucket_info['index_type'],
'owner': bucket_info['owner'],
'placement_rule': bucket_info['placement_rule']
})
b_size.labels(name=bucket_info['bucket'], id=bucket_info['id']).set(bucket_info['usage']['rgw.main']['size'])
b_actual_size.labels(name=bucket_info['bucket'], id=bucket_info['id']).set(bucket_info['usage']['rgw.main']['size_actual'])
b_num_objects.labels(name=bucket_info['bucket'], id=bucket_info['id']).set(bucket_info['usage']['rgw.main']['num_objects'])
except KeyError:
pass
def parse_extended_bucket_info(bucket_extended_info):
bucket_info = bucket_extended_info['data']['bucket_info']
try:
b_num_shards.labels(name=bucket_info['bucket']['name'], id=bucket_info['bucket']['bucket_id']).set(bucket_info['num_shards'])
b_bi_shard_hash_type.labels(name=bucket_info['bucket']['name'], id=bucket_info['bucket']['bucket_id']).set(bucket_info['bi_shard_hash_type'])
except KeyError:
pass
if __name__ == '__main__':
# Init registry to collect all measurements
reg = CollectorRegistry()
b_info = Info('radosgw_bucket', 'Metadata of Bucket', ['name', 'id'], registry=reg)
b_size = Gauge('radosgw_bucket_size', 'Bucket size in Bytes', ['name', "id"], registry=reg)
b_actual_size = Gauge('radosgw_bucket_actual_size', 'Actual bucket size in Bytes (including headers)', ['name', "id"], registry=reg)
b_num_objects = Gauge('radosgw_bucket_number_of_objects', 'Number of objects in the bucket', ['name', "id"], registry=reg)
b_num_shards = Gauge('radosgw_bucket_number_of_shards', 'Number of shards in the bucket', ['name', "id"], registry=reg)
b_bi_shard_hash_type = Gauge('radosgw_bucket_shard_hash_type', 'Bucket shard hash type', ['name', "id"], registry=reg)
# Configure user with the right permissions!
# radosgw-admin caps add --caps "metadata=read; usage=read; buckets=read; users=read" --uid <user>
rgw = RGWAdmin(access_key='S3user1', secret_key='S3user1key', server='192.168.170.22:8080', secure=False)
# Get list of bucket_name:bucket_id and process metadata of one bucket after each other
for bucket_name in rgw.get_buckets():
try:
bucket_info = rgw.get_bucket(bucket=bucket_name)
bucket_extended_info = rgw.get_metadata(metadata_type='bucket.instance', key="{}:{}".format(bucket_info['bucket'], bucket_info['id']))
except requests.exceptions.ConnectionError:
print('Timeout when fetching bucket {}'.format(bucket_name))
parse_bucket_info(bucket_info)
parse_extended_bucket_info(bucket_extended_info)
# Finished collecting information - write result to file
write_to_textfile('/etc/prometheus/node-exporter/radosgw.prom', reg)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment