Skip to content

Instantly share code, notes, and snippets.

@chicagobuss
Last active November 6, 2019 23:20
Show Gist options
  • Save chicagobuss/bf9526c47e72c5119315e1fedef1ea0e to your computer and use it in GitHub Desktop.
Save chicagobuss/bf9526c47e72c5119315e1fedef1ea0e to your computer and use it in GitHub Desktop.
for helping customers use our metrics firehose
#!/usr/bin/env python3
#
# cc-viz.py
#
# A hastily-thrown together collection of functions that gets confluent cloud metrics into datadog, graphite, or prometheus (or any combination of those)
#
GRAPHITE_HOST = '10.1.2.3'
GRAPHITE_PORT = 2003
GRAPHITE_METRIC_PREFIX = 'confluent.cloud.kafka'
HOSTNAME = 'your_hostname_here'
PROM_PUSH_GATEWAY_HOST = '10.1.2.3'
PROM_PUSH_GATEWAY_POR = '9091'
DD_API_KEY = 'YOUR_DATADOG_API_KEY_HERE'
CCLOUD_KAFKA_BOOTSTRAP = 'pkc-emp1e.us-west-2.aws.confluent.cloud:9092'
CCLOUD_API_KEY = 'YOUR_CONFLUENT_CLOUD_API_KEY'
CCLOUD_API_SECRET = 'YOUR_CONFLUENT_CLOUD_API_SECRET'
CCLOUD_CONSUMER_GROUP = 'metrics-viz'
import sys
import getopt
import json
import logging
import graphyte
from confluent_kafka import Consumer, KafkaException, KafkaError
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
from pprint import pformat
# Prometheus push gateway prep
prom_registry = CollectorRegistry()
def stats_cb(stats_json_str):
stats_json = json.loads(stats_json_str)
print('\nKAFKA Stats: {}\n'.format(pformat(stats_json)))
def print_usage_and_exit(program_name):
sys.stderr.write('Usage: %s [options..] <bootstrap-brokers> <api_key> <api_secret> <group> <topic1> <topic2> ..\n' % program_name)
options = '''
Options:
-T <intvl> Enable client statistics at specified interval (ms)
'''
sys.stderr.write(options)
sys.exit(1)
def get_dd_series(metric_name, value, ts, tags):
tag_strings = []
for tag in tags.keys():
tag_strings.append('%s:%s' % (tag, tags[tag]))
series = {
'metric': metric_name,
'points': [[ts, value]],
'type': "gauge",
'host': HOSTNAME,
'tags': tag_strings
}
return series
# for posting a metric as a gauge to datadog
def dd_post_gauge(dd_payload):
response = requests.post(
'https://api.datadoghq.com/api/v1/series?api_key=%s' % DD_API_KEY,
data=json.dumps(dd_payload)
)
if response.text == '{"status": "ok"}':
#print"# posted payload successfully - 1st metric: %s" % dd_payload['series'][0])
pass
else:
print("# problem posting payload - 1st metric: %s - datadog response: %s" % (dd_payload['series'][0], response.text))
# for graphite-style flat metrics
def flatten(metric):
m_name = metric.get('name')
m_tags = metric.get('tags')
if m_tags:
try:
return '%s.%s.%s.user_%s.%s_request.%s' % (
m_name,
m_tags.get('tenant'),
m_tags.get('source'),
m_tags.get('user'),
m_tags.get('request_type'),
m_tags.get('unit')
)
except Exception as e:
print("Unable to flatten metric %s with tags %s" % (m_name, m_tags))
return '%s.%s' % (m_name, '-'.join(['%s:%s' % (k, v) for k,v in m_tags.items()]))
return m_name
if __name__ == '__main__':
optlist, argv = getopt.getopt(sys.argv[1:], 'T:')
#if len(argv) < 3:
# print_usage_and_exit(sys.argv[0])
#broker = argv[0]
brokers = CCLOUD_KAFKA_BOOTSTRAP
#api_key = argv[1]
api_key = CCLOUD_API_KEY
#api_secret = argv[2]
api_secret = CCLOUD_API_SECRET
#group = argv[3]
group = CCLOUD_CONSUMER_GROUP
#topics = argv[4:]
topics = ['metrics.v1']
# see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md for more configs
conf = {
'bootstrap.servers': brokers,
'sasl.username': api_key,
'sasl.password': api_secret,
'group.id': group,
'sasl.mechanisms':'PLAIN',
'request.timeout.ms': 20000,
'session.timeout.ms': 6000,
'security.protocol':'SASL_SSL',
# If you don't have this file yet, download it from here: https://curl.haxx.se/ca/cacert.pem
'ssl.ca.location': '/usr/local/etc/openssl/cacert.pem',
'default.topic.config': {'auto.offset.reset': 'largest'}
}
# to start at current messages (if you haven't ever consumed with this group id yet)
# 'default.topic.config': {'auto.offset.reset': 'largest'}
# to start at the beginning:
# 'default.topic.config': {'auto.offset.reset': 'smallest'}
# to start where you left off:
# 'default.topic.config': {'auto.offset.reset': 'latest'}
# Check to see if -T option exists
for opt in optlist:
if opt[0] != '-T':
continue
try:
intval = int(opt[1])
except ValueError:
sys.stderr.write("Invalid option value for -T: %s\n" % opt[1])
sys.exit(1)
if intval <= 0:
sys.stderr.write("-T option value needs to be larger than zero: %s\n" % opt[1])
sys.exit(1)
conf['stats_cb'] = stats_cb
conf['statistics.interval.ms'] = int(opt[1])
# Create logger for consumer (logs will be emitted when poll() is called)
logger = logging.getLogger('consumer')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s'))
logger.addHandler(handler)
# Create Consumer instance
c = Consumer(conf, logger=logger)
def print_assignment(consumer, partitions):
print('Assignment:', partitions)
# Subscribe to topics
c.subscribe(topics, on_assign=print_assignment)
# prepare graphyte
graphyte.init(GRAPHITE_HOST, port=GRAPHITE_PORT, prefix=GRAPHITE_METRIC_PREFIX, protocol='udp')
# Read messages from Kafka, print to stdout
try:
while True:
msg = c.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
# Error or event
if msg.error().code() == KafkaError._PARTITION_EOF:
# End of partition event
sys.stderr.write('%% %s [%d] reached end at offset %d\n' %
(msg.topic(), msg.partition(), msg.offset()))
else:
# Error
raise KafkaException(msg.error())
else:
# Proper message
#sys.stderr.write('%% %s [%d] at offset %d with key %s:\n' %
# (msg.topic(), msg.partition(), msg.offset(),
# str(msg.key())))
#print(msg.value())
msg_data = json.loads(msg.value().decode('utf-8'))
# print(json.dumps(msg_data))
m_name = msg_data.get('name')
m_value = msg_data.get('value')
m_tags = msg_data.get('tags')
if m_name and m_value:
# sending to graphite with tags is still problematic, so we'll create an opinianted heirarchal metic name instead
graphyte.send(flatten(msg_data), m_value)
#if m_tags:
# #g = Gauge(m_name, 'description for %s' % m_name, registry=prom_registry, \
# # labelnames=m_tags.keys(), labelvalues=m_tags.values())
# #graphyte.send(m_name, m_value, tags=m_tags)
#
# # Use this section if you want to send our stats straight to datadog
# dd_series = [get_dd_series('%s.m_name' % (GRAPHITE_METRIC_PREFIX, m_name), m_value, msg_data.get('timestamp'), m_tags)]
# dd_payload = { 'series': dd_series }
# dd_post_gauge(dd_payload)
#
#else:
# #g = Gauge(m_name, 'description for %s' % m_name, registry=prom_registry)
# graphyte.send(m_name, m_value)
#print("pushing g %s to gateway" % str(g))
#push_to_gateway('%s:%s' % (PROM_PUSH_GATEWAY_HOST, PROM_PUSH_GATEWAY_PORT), job='batchA', registry=prom_registry)
except KeyboardInterrupt:
sys.stderr.write('%% Aborted by user\n')
finally:
# Close down consumer to commit final offsets.
c.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment