Skip to content

Instantly share code, notes, and snippets.

@mulbc
Created September 4, 2019 08:58
Show Gist options
  • Save mulbc/ab70e912e582b9d147811762d5ed6db1 to your computer and use it in GitHub Desktop.
Save mulbc/ab70e912e582b9d147811762d5ed6db1 to your computer and use it in GitHub Desktop.
Cosbench misc parses
# This program parses the cosbench run-histpry CSV file and identifies Cosbench test start and stop times.
# It then connects to a Prometheus TSDB and tries to find the ceph_cluster_total_objects at these times.
# The results are then written to stdout in CSV format
# Make sure to update the Prometheus server's address below
#!/usr/bin/env python3
import csv
import datetime
import sys
import requests
print('workload ID;workload Description;objects before workload;objects after workload')
with open(sys.argv[0]) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count >0 and len(row[3]) > 0 and len(row[4]) > 0:
# Calculate unix timestamp and convert to ms
start_time = int((datetime.datetime.strptime(row[3], '%Y-%m-%d %H:%M:%S') - datetime.datetime(1970,1,1)).total_seconds())
stop_time = int((datetime.datetime.strptime(row[4], '%Y-%m-%d %H:%M:%S') - datetime.datetime(1970,1,1)).total_seconds())
object_count_start = 0
object_count_stop = 0
payload = {
'query': 'ceph_cluster_total_objects',
'time': start_time
}
r = requests.get('http://192.168.120.9:9090/api/v1/query', params=payload)
if r.status_code == requests.codes.ok:
try:
object_count_start = r.json()['data']['result'][0]['value'][1]
except (KeyError, IndexError):
pass
payload = {
'query': 'ceph_cluster_total_objects',
'time': stop_time
}
r = requests.get('http://192.168.120.9:9090/api/v1/query', params=payload)
if r.status_code == requests.codes.ok:
try:
object_count_stop = r.json()['data']['result'][0]['value'][1]
except (KeyError, IndexError):
pass
print('{};{};{};{}'.format(row[0],row[1],object_count_start, object_count_stop))
line_count+=1
# This program processes the cosbench run-histoy CSV file and
# inserts Grafana annotations where necessary
# This marks cosbench start and stop times in Grafana graphs
# Make sure to filter for the 'bench' tag in Grafana to see the annotations in your dashboards
#!/usr/bin/env python
import csv
import datetime
import sys
import requests
token = "eyJrIjoiUnNkYmZyTFE4MlpUNDNHYWlZVVFnYmNzQ3pWdHJiNTMiLCJuIjoiYW5ub3RhdG9yIiwiaWQiOjF9" # new Grafana
# token = "eyJrIjoiaHJ0NmZEZXBvc3IxMkhYQWN6c1ZuYTJCZDNsbFdqMFgiLCJuIjoiYW5ub3RhdG9yIiwiaWQiOjF9" # old Grafana
headers = {"Authorization": "Bearer {}".format(token)}
endpoint = "http://192.168.120.9:4000/api/annotations"
get_params = {"type": "annotation"}
annotations = requests.get(endpoint, headers=headers, params=get_params).json()
with open(sys.argv[0]) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count >0 and len(row[3]) > 0 and len(row[4]) > 0:
# Calculate unix timestamp and convert to ms
start_time = int((datetime.datetime.strptime(row[3], '%Y-%m-%d %H:%M:%S') - datetime.datetime(1970,1,1)).total_seconds() * 1000)
stop_time = int((datetime.datetime.strptime(row[4], '%Y-%m-%d %H:%M:%S') - datetime.datetime(1970,1,1)).total_seconds() * 1000)
print('{} - {} Name: {}-{}').format(start_time, stop_time, row[0], row[1])
start_data = {"time": start_time, "text": 'START {}-{}'.format(row[0], row[1]), "tags": ["gosbench", "bench"] }
end_data = {"time": stop_time, "text": 'STOP {}-{}'.format(row[0], row[1]), "tags": ["gosbench", "bench"] }
start = requests.post(endpoint, headers=headers, data=start_data)
stop = requests.post(endpoint, headers=headers, data=end_data)
if not start or not stop:
print("Issue with generating annotation for {}".format(row[0]))
sys.exit(1)
line_count+=1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment