Skip to content

@brunosoab /optimalperf.rb forked from vinibaggio/optimalperf.rb
Created

Embed URL

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
#--
# Copyright (C)2008 Ilya Grigorik
# You can redistribute this under the terms of the Ruby license
#
# Modifications by Vinicius B. Fuentes
#++
require 'rubygems'
require 'optparse'
require 'ruport'
class AutoPerf
def initialize(opts = {})
@conf = {}
OptionParser.new do |opts|
opts.banner = "Usage: autoperf.rb [-c config]"
opts.on("-c", "--config [string]", String, "configuration file") do |v|
@conf = parse_config(v)
end
end.parse!
run()
end
def parse_config(config_file)
raise Errno::EACCES, "#{config_file} is not readable" unless File.readable?(config_file)
conf = {}
open(config_file).each { |line|
line.chomp
unless (/^\#/.match(line))
if(/\s*=\s*/.match(line))
param, value = line.split(/\s*=\s*/, 2)
var_name = "#{param}".chomp.strip
value = value.chomp.strip
new_value = ''
if (value)
if value =~ /^['"](.*)['"]$/
new_value = $1
else
new_value = value
end
else
new_value = ''
end
conf[var_name] = new_value
end
end
}
if conf['wlog']
conf['wlog'] = Dir[conf['wlog']].sort
end
return conf
end
def benchmark(conf)
httperf_opt = conf.keys.grep(/httperf/).collect {|k| "--#{k.gsub(/httperf_/, '')} #{conf[k]}"}.join(" ")
if conf['wlog']
wlog = conf['wlog'].shift
conf['wlog'].push wlog
wlog_opt = "--wlog n,#{wlog}"
end
httperf_cmd = "httperf --hog --server #{conf['host']} --uri #{conf['uri']} --port #{conf['port']} #{httperf_opt} #{wlog_opt}"
res = Hash.new("")
IO.popen("#{httperf_cmd} 2>&1") do |pipe|
puts "\n#{httperf_cmd}"
while((line = pipe.gets))
res['output'] += line
case line
when /^Total: .*replies (\d+)/ then res['replies'] = $1
when /^Connection rate: (\d+\.\d)/ then res['conn/s'] = $1
when /^Request rate: (\d+\.\d)/ then res['req/s'] = $1
when /^Reply time .* response (\d+\.\d)/ then res['reply time'] = $1
when /^Net I\/O: (\d+\.\d)/ then res['net io (KB/s)'] = $1
when /^Errors: total (\d+)/ then res['errors'] = $1
when /^Reply rate .*min (\d+\.\d) avg (\d+\.\d) max (\d+\.\d) stddev (\d+\.\d) \((\d+) samples\)/ then
res['replies/s min'] = $1
res['replies/s avg'] = $2
res['replies/s max'] = $3
res['replies/s stddev'] = $4
res['samples'] = $5
when /^Reply status: 1xx=\d+ 2xx=\d+ 3xx=\d+ 4xx=\d+ 5xx=(\d+)/ then res['5xx status'] = $1
end
end
end
return res
end
def run
results = {}
optimal_speed = 0.0
error_lambda = @conf['error_lambda'].to_f
response_threshold = @conf['response_threshold'].to_i
samples = @conf['samples'] || '(Not specified)'
report = Table(:column_names => ['rate', 'conn/s', 'req/s', 'replies/s avg', 'reply time',
'errors', '5xx status', 'net io (KB/s)', 'samples'])
(@conf['low_rate'].to_i..@conf['high_rate'].to_i).step(@conf['rate_step'].to_i) do |rate|
if @conf['run_before']
system(@conf['run_before'])
end
@conf.merge!({'httperf_rate' => rate})
# Calculate number of connections based on the number of samples
if @conf['time_of_test'] # Highest priority
num_of_conns = @conf['time_of_test'].to_i * rate
elsif @conf['samples']
num_of_conns = @conf['samples'].to_i * rate * 5 + 1 # Conns. are sampled each 5s
else
num_of_conns = @conf['httperf_num-conns']
end
@conf.merge!('httperf_num-conns' => num_of_conns)
results[rate] = benchmark(@conf.merge(@conf))
report << results[rate].merge({'rate' => rate, 'conns' => num_of_conns})
puts report.to_s
puts results[rate]['output'] if results[rate]['errors'].to_i > 0 || results[rate]['5xx status'].to_i > 0
reqs_per_sec = results[rate]['req/s'].to_f
reply_time = results[rate]['reply time'].to_f
errors = results[rate]['errors'].to_i
current_rate = rate.to_f
if reqs_per_sec > optimal_speed and
reply_time < response_threshold and
errors == 0 and
(current_rate - reqs_per_sec).abs < error_lambda
optimal_speed = current_rate
end
sleep @conf['inbetween_time'].to_f
end
puts "Optimal speed: #{optimal_speed} reqs/s (error lambda is #{error_lambda})"
puts "Samples: #{samples}"
end
end
trap("INT") {
puts "Terminating tests."
Process.exit
}
AutoPerf.new()
# Autoperf Configuration File
# The host, URI (relative to the document root) and port to test.
host = 10.180.167.241
uri = /
port = 80
# The 'rate' is the number of number of connections to open per second.
# A series of tests will be conducted, starting at low rate,
# increasing by rate step, and finishing at high_rate.
low_rate = 10
high_rate = 200
rate_step = 10
# How much error is accepted, example:
# If the following happens:
# rate: 150
# reqs/s = 149.6
#
# The error is 0.4, thus with a lambda of 0.5, it is accepted
# as valid results (i.e. not saturated)
error_lambda = 0.5
# Time between tests in seconds
inbetween_time = 30
# Number of samples. If specified, httperf_num-conns will be ignored.
samples = 5
# Time of test in seconds. If specified, httpperf_num-conns and samples will be ignored.
#time_of_test = 60
# httperf options
# wlog specifies a replay log file (null terminated requests paths)
# 'n' prefix tells httperf to stop after all requests in the file
# have been replayed
#httperf_wlog = y,urls-100
#httperf_period = e15
# Autoperf can generate different wlog instructions for every run if
# you set wlog (not httperf_wlog) to a glob pattern of the files you
# want to use.
# wlog = x?.nul
# num-conns is the total number of connections to make during a test
# num-calls is the number of requests per connection (if keep alive is supported)
# The product of num_call and rate is the the approximate number of
# requests per second that will be attempted.
httperf_num-conns = 20000
httperf_num-calls = 1
# timeout sets the maximimum time (in seconds) that httperf will wait
# for replies from the web server. If the timeout is exceeded, the
# reply concerned is counted as an error.
httperf_timeout = 5
# httperf_debug = 10
# add-header adds an HTTP header
# If your test server is using HTTP basic auth, add a header like the following.
# To figure out what it should be use "curl -u user:password -v ..."
# httperf_add-header = '"Authorization: Basic AbC123xYz456==\n"'
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Something went wrong with that request. Please try again.