Skip to content

Instantly share code, notes, and snippets.

@Raboo
Forked from macros/gist:553c2ef4d4b0594154f5
Last active October 27, 2015 10:30
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Raboo/27afef54b023b69b12ff to your computer and use it in GitHub Desktop.
Save Raboo/27afef54b023b69b12ff to your computer and use it in GitHub Desktop.
haproxy multi socket gmetric
#!/usr/bin/ruby
require 'socket'
abort('Upgrade ruby or die...') if RUBY_VERSION < "1.9"
pidfile = '/var/run/haproxy_gmetric.pid'
if File.exist?(pidfile)
pid = File.read(pidfile).to_i
begin
Process.kill(0, pid)
abort("appears to be running with pid #{pid}, exiting...")
rescue Errno::EPERM
abort("No permission to query #{pid}!");
rescue Errno::ESRCH
puts "#{pid} is NOT running, daemonizeing..."
Process.daemon
File.open(pidfile, 'w') { |file| file.write(Process.pid) };
rescue
abort("Unable to determine status for #{pid} : #{$!}")
end
else
puts "no existing pidfile, daemonizeing..."
Process.daemon
File.open(pidfile, 'w') { |file| file.write(Process.pid) }
end
PROCESS_METRICS = {
'Uptime_sec' => %w(uptime seconds),
'ConnRate' => %w(connection_rate connections rate),
'SessRate' => %w(session_rate sessions rate),
'SslFrontendKeyRate' => %w(sslkey_rate lookups rate),
'SslCacheLookups' => %w(sslcache_lookups lookups counter),
'SslCacheMisses' => %w(sslcache_misses misses counter)
}
FRONTEND_METRICS = {
'scur' => %w(sessions sessions rate),
'bin' => %w(bytes_in bytes counter),
'bout' => %w(bytes_out bytes counter),
'dreq' => %w(denied_request denys counter),
'ereq' => %w(request_error errors counter),
'req_rate' => %w(requests requests rate)
}
BACKEND_METRICS = {
'qcur' => %w(queued_requests requests rate),
'scur' => %w(sessions sessions rate),
'bin' => %w(bytes_in bytes counter),
'bout' => %w(bytes_out bytes counter),
'econ' => %w(connection_error errors counter),
'eresp' => %w(response_error errors counter),
'wretr' => %w(connection_retries retries counter),
'rtime' => %w(response_time ms rate),
'ctime' => %w(connect_time ms rate)
}
num_procs = 8
$last_update = nil
def read_stats_socket(socket_num)
lines = ''
socket_num=socket_num.to_s.rjust(2, '0')
socket_file = "/var/lib/haproxy/stats#{socket_num}.sock"
if File.exist?(socket_file)
socket = UNIXSocket.new(socket_file)
# Grab global process stats and frontend/backend stats
socket.puts('show info; show stat -1 3 -1')
while (line = socket.gets) do
lines << line
end
socket.close
end
lines
end
def process_raw_lines(lines)
fields = []
metrics = { 'process' => {}, 'frontends' => {}, 'backends' => {} }
# Process stats are colon separated pairs
lines.each_line do |line|
line.match(': ') do
k, v = line.chomp.split(':')
metrics['pid'] = v.to_i if k == 'Pid'
metrics['process'][k] = v.to_i if PROCESS_METRICS.keys.include?(k)
end
# Extract the field for the front/back csv stats
line.match('^# (.*)') do |m|
fields = m[1].split(',')
end
line.match('^(.*),BACKEND') do |m|
name = m[1]
metrics['backends'][name] = {}
values = []
values = line.split(',')
fields.each_index do |i|
metrics['backends'][name][fields[i]] = values[i].to_i if BACKEND_METRICS.keys.include?(fields[i])
end
end
line.match('^(.*),FRONTEND') do |m|
name = m[1]
metrics['frontends'][name] = {}
values = []
values = line.split(',')
fields.each_index do |i|
metrics['frontends'][name][fields[i]] = values[i].to_i if FRONTEND_METRICS.keys.include?(fields[i])
end
end
end
metrics
end
# stats are converted to delta since last update
# counters are reset on every cert reload so pids
# are tracked to identify counter resets
def aggregate_stats(stats)
uptime = 0
update = { 'process' => {}, 'frontends' => {}, 'backends' => {} }
stats[1]['process'].each do |k, _v|
update['process'][k] = 0
end
stats.each do |i, proc_stats|
uptime = proc_stats['process']['Uptime_sec'] if proc_stats['process']['Uptime_sec'] > uptime
pid = proc_stats['pid']
proc_stats['process'].each do |k, v|
if PROCESS_METRICS[k][2] == 'counter' && pid == $last_update[i]['pid']
value = v - $last_update[i]['process'][k]
else
value = v
end
update['process'][k] += value
end
proc_stats['frontends'].each do |fname, fmetrics|
update['frontends'][fname] ||= {}
fmetrics.each do |k, v|
update['frontends'][fname][k] ||= 0
if FRONTEND_METRICS[k][2] == 'counter' && pid == $last_update[i]['pid']
value = v - $last_update[i]['frontends'][fname][k]
else
value = v
end
update['frontends'][fname][k] += value
end
end
proc_stats['backends'].each do |bname, bmetrics|
update['backends'][bname] ||= {}
bmetrics.each do |k, v|
update['backends'][bname][k] ||= 0
if BACKEND_METRICS[k][2] == 'counter' && pid == $last_update[i]['pid']
value = v - $last_update[i]['backends'][bname][k]
else
value = v
end
update['backends'][bname][k] += value
end
end
end
update['process']['Uptime_sec'] = uptime
update
end
def ganglia_send(data)
gmetric="/usr/local/bin/gmetric_client.sh --group=haproxy --type=uint32 --dmax 86400"
data['process'].each do |k, v|
system "#{gmetric} --name=haproxy_#{PROCESS_METRICS[k][0]} --units=#{PROCESS_METRICS[k][1]} --value=#{v}"
end
data['frontends'].each do |fname, fmetrics|
fmetrics.each do |k, v|
system "#{gmetric} --name=haproxy_frontend_#{fname}_#{FRONTEND_METRICS[k][0]} --units=#{FRONTEND_METRICS[k][1]} --value=#{v}"
end
end
data['backends'].each do |bname, bmetrics|
bmetrics.each do |k, v|
system "#{gmetric} --name=haproxy_backend_#{bname}_#{BACKEND_METRICS[k][0]} --units=#{BACKEND_METRICS[k][1]} --value=#{v}"
end
end
end
while true
current = {}
# No master process in haproxy, must poll all stats sockets
(1..num_procs).each do |i|
stats = read_stats_socket(i)
current[i] = process_raw_lines(stats) unless stats.empty?
end
unless current.empty?
if $last_update.nil?
$last_update = current
else
cooked_stats = aggregate_stats(current)
$last_update = current
ganglia_send(cooked_stats)
end
end
sleep 10
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment