Skip to content

Instantly share code, notes, and snippets.

@adaam
Created December 19, 2014 07:19
Show Gist options
  • Save adaam/e5c86b64fd239b7e7a0b to your computer and use it in GitHub Desktop.
Save adaam/e5c86b64fd239b7e7a0b to your computer and use it in GitHub Desktop.
feed each machine in elasticsearch cluster status to cacti
#!/usr/bin/env python
# Script to seprate host data send to cacti
# adaam 2014.07.15
import urllib2
import simplejson
import getopt
import os, sys, datetime, re
# set default host to localhost
# but elasticsearch host may not be this ip
host = '127.0.0.1'
port = '9200'
# set now and old time to create temp file
# to prevent query server too much times
# now setting is remove temp file after one hour(not test yet)
now = datetime.datetime.now()
old = datetime.datetime.now() - datetime.timedelta(hours=1)
datetime = now.strftime("%Y%m%d_%H%M")
olddatetime = old.strftime("%Y%m%d_%H")
#print datetime
#print olddatetime
# get paremeter from command line
try:
opts, args = getopt.getopt(sys.argv[1:],"h:p:",["host","port"])
except getopt.GetoptError:
print sys.argv[0],' -h <target host> -p <port number>'
sys.exit(2)
for opt,arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = arg
#print 'host: ', host
#print 'port: ', port
#print 'http://'+host+':'+port
# check file exist or not, if yes, then read file and not query server again
c = urllib2.urlopen('http://'+host+':'+port+'/_cluster/health',timeout=20)
c_data = c.read()
cluster_data = simplejson.loads(c_data)
cluster_name = cluster_data['cluster_name']
# remove file from temp folder
filelist = [ f for f in os.listdir("/tmp/cacti") if f.startswith('cacti_els_'+cluster_name+olddatetime) ]
for f in filelist:
os.chdir('/tmp/cacti')
os.remove(f)
#if not os.path.exists('/tmp/cacti/cacti_els_'+cluster_name+datetime+'.log'):
if not os.path.exists('/tmp/cacti/cacti_els_'+cluster_name+datetime+'.log'):
f = open('/tmp/cacti/cacti_els_'+cluster_name+datetime+'.log', 'w')
#u = urllib2.urlopen('http://'+host+':'+port+'/_nodes/stats/')
u = urllib2.urlopen('http://'+host+':'+port+'/_nodes/stats?all=true&human=false',timeout=20)
json = u.read()
f.write(json)
f.close()
elif os.stat('/tmp/cacti/cacti_els_'+cluster_name+datetime+'.log').st_size == 0 :
f = open('/tmp/cacti/cacti_els_'+cluster_name+datetime+'.log', 'w')
#u = urllib2.urlopen('http://'+host+':'+port+'/_nodes/stats/')
u = urllib2.urlopen('http://'+host+':'+port+'/_nodes/stats?all=true&human=false',timeout=20)
json = u.read()
f.write(json)
f.close()
# read file to string, prepare for json parse
with open('/tmp/cacti/cacti_els_'+cluster_name+datetime+'.log', 'r') as f:
j = f.read()
f.close()
#print 'kekeke',j
# use simplejson to parse string
node_stats = simplejson.loads(j)
#print k['nodes']
# prepare two empty dict var
# stats is store final result
# mapping is store ip/host/hostname mapping to elasticsearch own id
stats = {}
mapping = {}
k = node_stats
for key, value in k['nodes'].iteritems():
m = re.search('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}',k['nodes'][key]['transport_address'])
mapping[m.group(0)] = key
if 'hostname' in k['nodes'][key]:
mapping[k['nodes'][key]['hostname']] = key
if 'host' in k['nodes'][key]:
mapping[k['nodes'][key]['host']] = key
# print mapping
#for key, value in node_stats['nodes'].iteritems():
# print node_stats['nodes'][key]['indices']['docs']
if host in mapping:
key = mapping[host]
# print key
stats['indeces_docs_count'] = node_stats['nodes'][key]['indices']['docs']['count']
stats['indeces_docs_deleted'] = node_stats['nodes'][key]['indices']['docs']['deleted']
stats['indeces_store_size'] = node_stats['nodes'][key]['indices']['store']['size_in_bytes']
stats['indexing_index_total'] = node_stats['nodes'][key]['indices']['indexing']['index_total']
stats['indexing_index_time'] = node_stats['nodes'][key]['indices']['indexing']['index_time_in_millis']
stats['indexing_delete_total'] = node_stats['nodes'][key]['indices']['indexing']['delete_total']
stats['indexing_delete_time'] = node_stats['nodes'][key]['indices']['indexing']['delete_time_in_millis']
stats['get_total'] = node_stats['nodes'][key]['indices']['get']['total']
stats['get_time'] = node_stats['nodes'][key]['indices']['get']['time_in_millis']
stats['get_exists_total'] = node_stats['nodes'][key]['indices']['get']['exists_total']
stats['get_exists_time'] = node_stats['nodes'][key]['indices']['get']['exists_time_in_millis']
stats['get_missing_total'] = node_stats['nodes'][key]['indices']['get']['missing_total']
stats['get_missing_time'] = node_stats['nodes'][key]['indices']['get']['missing_time_in_millis']
stats['search_query_total'] = node_stats['nodes'][key]['indices']['search']['query_total']
stats['search_query_time'] = node_stats['nodes'][key]['indices']['search']['query_time_in_millis']
stats['search_fetch_total'] = node_stats['nodes'][key]['indices']['search']['fetch_total']
stats['search_fetch_time'] = node_stats['nodes'][key]['indices']['search']['fetch_time_in_millis']
stats['merges_total'] = node_stats['nodes'][key]['indices']['merges']['total']
stats['merges_time'] = node_stats['nodes'][key]['indices']['merges']['total_time_in_millis']
stats['merges_total_docs'] = node_stats['nodes'][key]['indices']['merges']['total_docs']
stats['merges_total_size'] = node_stats['nodes'][key]['indices']['merges']['total_size_in_bytes']
stats['refresh_total'] = node_stats['nodes'][key]['indices']['refresh']['total']
stats['refresh_total_time'] = node_stats['nodes'][key]['indices']['refresh']['total_time_in_millis']
stats['flush_total'] = node_stats['nodes'][key]['indices']['flush']['total']
stats['flush_total_time'] = node_stats['nodes'][key]['indices']['flush']['total_time_in_millis']
stats['warmer_total'] = node_stats['nodes'][key]['indices']['warmer']['total']
stats['warmer_total_time'] = node_stats['nodes'][key]['indices']['warmer']['total_time_in_millis']
stats['filter_cache_mem_size'] = node_stats['nodes'][key]['indices']['filter_cache']['memory_size_in_bytes']
stats['id_cache_mem_size'] = node_stats['nodes'][key]['indices']['id_cache']['memory_size_in_bytes']
stats['fielddata_mem_size'] = node_stats['nodes'][key]['indices']['fielddata']['memory_size_in_bytes']
stats['completion_size'] = node_stats['nodes'][key]['indices']['completion']['size_in_bytes']
if 'segments' in node_stats['nodes'][key]['indices']:
stats['segments'] = node_stats['nodes'][key]['indices']['segments']['count']
stats['process_open_files'] = node_stats['nodes'][key]['process']['open_file_descriptors']
stats['process_cpu_percent'] = node_stats['nodes'][key]['process']['cpu']['percent']
stats['process_cpu_sys'] = node_stats['nodes'][key]['process']['cpu']['sys_in_millis']
stats['process_cpu_user'] = node_stats['nodes'][key]['process']['cpu']['user_in_millis']
stats['process_cpu_total'] = node_stats['nodes'][key]['process']['cpu']['total_in_millis']
stats['process_mem_resident'] = node_stats['nodes'][key]['process']['mem']['resident_in_bytes']
stats['process_mem_share'] = node_stats['nodes'][key]['process']['mem']['share_in_bytes']
stats['process_mem_virtual'] = node_stats['nodes'][key]['process']['mem']['total_virtual_in_bytes']
stats['jvm_uptime'] = node_stats['nodes'][key]['jvm']['uptime_in_millis']
stats['jvm_mem_heap_used'] = node_stats['nodes'][key]['jvm']['mem']['heap_used_in_bytes']
stats['jvm_mem_heap_committed'] = node_stats['nodes'][key]['jvm']['mem']['heap_committed_in_bytes']
if 'heap_max_in_bytes' in node_stats['nodes'][key]['jvm']['mem']:
stats['jvm_mem_heap_max'] = node_stats['nodes'][key]['jvm']['mem']['heap_max_in_bytes']
stats['jvm_mem_non_heap_used'] = node_stats['nodes'][key]['jvm']['mem']['non_heap_used_in_bytes']
stats['jvm_mem_non_heap_committed'] = node_stats['nodes'][key]['jvm']['mem']['non_heap_committed_in_bytes']
stats['jvm_threads'] = node_stats['nodes'][key]['jvm']['threads']['count']
#print stats['jvm_threads']
# prepare empty string var to output data to cacti
output_str=""
for key, value in stats.items():
# print str(key) + ":" + str(value)
output_str = output_str + str(key) + ":" + str(value) + " "
#stri = str(stats)
#print stri
print output_str
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment