Skip to content

Instantly share code, notes, and snippets.

@eminaktas
Created July 18, 2023 08:33
Show Gist options
  • Save eminaktas/1670d1fae19797a5549c93bd51350e9c to your computer and use it in GitHub Desktop.
Save eminaktas/1670d1fae19797a5549c93bd51350e9c to your computer and use it in GitHub Desktop.
Network test script
#! /bin/bash
# Install if iproute2 not installed
pkg="iproute2"
if cat /etc/os-release | grep -i alpine; then
echo -e "OS release is Alpine\nInstalling iproute2 with apk"
if apk -e info $pkg
then
echo "$pkg installed"
else
echo "$pkg NOT installed"
apk add iproute2
fi
else
echo -e "OS release is not Alpine\nInstalling iproute2 with apt"
if dpkg-query -l $pkg
then
echo "$pkg installed"
else
echo "$pkg NOT installed"
apt install iproute2
fi
fi
# Here are our generic metrics used in our reports. Reports are shaped according to these metrics. These metrics are used in all single tests.
METRICS="RESULT_BRAND,THROUGHPUT,THROUGHPUT_UNITS,THROUGHPUT_CONFID,LOCAL_SEND_SIZE,PROTOCOL,LOCAL_SEND_THROUGHPUT,LOCAL_RECV_THROUGHPUT,REMOTE_SEND_THROUGHPUT,REMOTE_RECV_THROUGHPUT,LOCAL_VERSION,REMOTE_VERSION,LOCAL_TRANSPORT_RETRANS,REMOTE_TRANSPORT_RETRANS,LOCAL_CPU_UTIL,LOCAL_CPU_PEAK_UTIL,LOCAL_CPU_PEAK_ID,LOCAL_CPU_COUNT,REMOTE_CPU_UTIL,REMOTE_CPU_PEAK_UTIL,REMOTE_CPU_PEAK_ID,REMOTE_CPU_COUNT,LOCAL_CPU_BIND,REMOTE_CPU_BIND,ELAPSED_TIME,DIRECTION"
# METRICS=RESULT_BRAND,THROUGHPUT,THROUGHPUT_UNITS,THROUGHPUT_CONFID,LOCAL_SEND_SIZE,LOCAL_RECV_SIZE,REMOTE_SEND_SIZE,REMOTE_RECV_SIZE,PROTOCOL,LOCAL_SEND_CALLS,LOCAL_BYTES_PER_SEND,LOCAL_RECV_CALLS,LOCAL_BYTES_PER_RECV,REMOTE_SEND_CALLS,REMOTE_BYTES_PER_SEND,REMOTE_RECV_CALLS,REMOTE_BYTES_PER_RECV,LOCAL_SEND_THROUGHPUT,LOCAL_RECV_THROUGHPUT,REMOTE_SEND_THROUGHPUT,REMOTE_RECV_THROUGHPUT,LOCAL_VERSION,REMOTEL_VERSION,LOCAL_TRANSPORT_RETRANS,REMOTE_TRANSPORT_RETRANS,LOCAL_CPU_UTIL,REMOTE_CPU_UTIL,REMOTE_INTERVAL_BURST,LOCAL_INTERVAL_BURST,LOCAL_BYTES_XFERD,SOURCE_ADDR,DEST_ADDR,LOCAL_CPU_BIND,REMOTE_CPU_BIND,ELAPSED_TIME,DIRECTION"
# COMMAND_LINE, add this to metric for debugging.
# These metrics are used in Request Response Tests, All rr tests including concurrents will use these metrics.
RR_METRICS="TRANSACTION_RATE,P50_LATENCY,P90_LATENCY,RT_LATENCY,MEAN_LATENCY,STDDEV_LATENCY,REQUEST_SIZE,RESPONSE_SIZE,BURST_SIZE"
# request response sizes for rr_crr tests
RR_SIZES=${RR_SIZES:-"1,1 1,1024, 1,4096, 1,8000"}
UDP_RR_SIZES=${UDP_RR_SIZES:-"1,1 1,1024, 1,4096, 1,8000"}
# These metrics are used in Concurrent tests
CONMETRICS="THROUGHPUT,THROUGHPUT_CONFID,LOCAL_SEND_SIZE,LOCAL_SEND_THROUGHPUT,LOCAL_RECV_THROUGHPUT,REMOTE_SEND_THROUGHPUT,REMOTE_RECV_THROUGHPUT,LOCAL_TRANSPORT_RETRANS,REMOTE_TRANSPORT_RETRANS,LOCAL_CPU_UTIL,LOCAL_CPU_PEAK_UTIL,LOCAL_CPU_COUNT,REMOTE_CPU_UTIL,REMOTE_CPU_PEAK_UTIL,REMOTE_CPU_COUNT,LOCAL_CPU_BIND,REMOTE_CPU_BIND"
# csv data headers
metric_header="test_name,throughput,throughput_units,throughput_confidence_width,local_send_size,protocol,local_send_throughput,local_recv_throughput,remote_send_throughput,remote_recv_throughput,local_version,remote_version,local_transport_retransmissions,remote_transport_retransmissions,local_cpu_util,local_cpu_peak_util,local_cpu_peak_id,local_cpu_count,remote_cpu_util,remote_cpu_peak_util,remote_cpu_peak_id,remote_cpu_count,local_cpu_bind,remote_cpu_bind,elapsed_time,direction,local_netstat_retransmission,source_address,destination_address"
rr_metric_header="test_name,throughput,throughput_units,throughput_confidence_width,local_send_size,protocol,local_send_throughput,local_recv_throughput,remote_send_throughput,remote_recv_throughput,local_version,remote_version,local_transport_retransmissions,remote_transport_retransmissions,local_cpu_util,local_cpu_peak_util,local_cpu_peak_id,local_cpu_count,remote_cpu_util,remote_cpu_peak_util,remote_cpu_peak_id,remote_cpu_count,local_cpu_bind,remote_cpu_bind,elapsed_time_sec,direction,transaction_rate_tran_per_sec,50th_percentile_latency_microseconds,90th_percentile_latency_microseconds,round_trip_latency_usec_per_tran,mean_latency_microseconds,stddev_latency_microseconds,request_size_bytes,response_size_bytes,initial_burst_requests,local_netstat_retransmission,source_address,destination_address"
#burst size
QD=${QD:-"0 1 16 64 128 256 512"}
# message sizes for stream tests
Message_Size=${Message_Size:-"128 1K 8K"}
# Number of concurrent tests are determined by number of cpu's
MAX_CPU_NUM=$((`cat /proc/cpuinfo | grep processor | wc -l`-1))
LHOST=$(hostname -i | awk '{print $1}' )
set -e
# this function can help us to run any number of netperf concurrently. Currently we are not using it. we might use this for future developments.
function run_duper_netperf() {
loops=$1
shift
echo "DUPER_NETPERF: NSTREAMS=$loops ARGS=$@"
for ((i=0; i<loops; i++)); do
prefix="$(printf "%02d" $i) "
(netperf -s 2 "$@" | sed -e "s/^/$prefix/") &
done
wait
}
# this function can help us to run any number of netperf concurrently. Currently we are not using it. we might use this for future developments.
function duper_netperf() {
run_duper_netperf "$@" | perl -ln -e 'BEGIN { $sum = 0; $count = 0 } END { print "NSTREAMS=$count\nAGGREGATE_THROUGHPUT=$sum"; } if (/ THROUGHPUT=(\S+)$/) { $sum += $1; $count += 1 } print;'
}
# This function helps us to run Request Response tests. Number of tests are determined by RR_SIZES*BURST_SIZES -o parameter help us to get the data in csv format.
function dorr() {
local proto=$1
echo $(date +%s)
rm -f /tmp/${proto}_rr_${lhostname}_${rhostname}.log
rm -f ${proto}_rr_${lhostname}_${rhostname}.log
touch /tmp/${proto}_rr_${lhostname}_${rhostname}.log
for rr_size in ${RR_SIZES}; do
for b in ${QD}; do
cmd="$CMD_PREFIX netperf \
-t omni \
-l 75 \
-H $rhost \
-s 2 \
-j \
-B ${proto}_rr \
-P 0 \
-T 0,0 \
-c \
-C \
-- \
-T ${proto} \
-d rr \
-o $METRICS,$RR_METRICS \
-r $rr_size \
-b $b"
echo $cmd
nstat -rn
$cmd| tee -a /tmp/${proto}_rr_${lhostname}_${rhostname}.log
# adds netstat local_netstat_retransmission to test results
sed -i -e '$ s/$/',"$( nstat | grep '^[^#]*Ret[^#]*$' | awk '{print $2}' |awk '{s+=$1} END {printf "%.0f\n", s}')"'/' /tmp/${proto}_rr_${lhostname}_${rhostname}.log
echo "END"
done
done
# here we create our report csv -o parameter help us to get the data in csv format.
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/${proto}_rr_${lhostname}_${rhostname}.log > ${proto}_rr_${lhostname}_${rhostname}.log
awk -v var="$rr_metric_header" -i inplace 'BEGINFILE{print var}{print}' ${proto}_rr_${lhostname}_${rhostname}.log
cat ${proto}_rr_${lhostname}_${rhostname}.log | grep -v error | uniq > ${proto}_rr_${lhostname}_${rhostname}_$(date +%s).csv
cat ${proto}_rr_${lhostname}_${rhostname}.log> ${proto}_rr_${lhostname}_${rhostname}_$(date +%s).log
rm ${proto}_rr_${lhostname}_${rhostname}.log
rm /tmp/${proto}_rr_${lhostname}_${rhostname}.log
}
# MAX_CPU_NUM=$((`cat /proc/cpuinfo | grep processor | wc -l`-1))
# This function helps us to run Concurrent Request Response tests. Number of tests are determined by RR_SIZES*BURST_SIZES -o parameter help us to get the data in csv format.
# Number of Concurrent tests are determined by MAX_CPU_NUM*RR_SIZES*BURST_SIZES We take avg of tests by cpu numbers. i.e:4 cpu , 4 concurrent netperf tests. results are calculated by 4 concurrent tests results/4. We also add a columm for total throughput by sum of 4 tests
function docpurr(){
local proto=$1
con_rr_header="test_name,avg_throughput_per_thread,throughput_confidence_width,local_send_size,local_send_throughput,local_recv_throughput,remote_send_throughput,remote_recv_throughput,local_transport_retransmissions,remote_transport_retransmissions,local_cpu_util,local_cpu_peak_util,local_cpu_count,remote_cpu_util,remote_cpu_peak_util,remote_cpu_count,local_cpu_bind,remote_cpu_bind,transaction_rate_tran_per_sec,50th_percentile_latency_microseconds,90th_percentile_latency_microseconds,round_trip_latency_usec_per_tran,mean_latency_microseconds,stddev_latency_microseconds,request_size_bytes,response_size_bytes,initial_burst_requests,total_throughput,total_local_transport_retransmissions,total_remote_transport_retransmissions,source_address,destination_address"
rm -f /tmp/test_concurrent_avg_${proto}_rr.log
# rm -f test_concurrent_avg_${proto}_rr.log
for rr_size in ${RR_SIZES}; do
for CPU in $(seq 0 $MAX_CPU_NUM); do
cmd="nice -20 $CMD_PREFIX netperf \
-T $CPU,$CPU \
-t omni \
-l 75 \
-H $rhost \
-s 2 \
-j \
-B concurrent_${proto}_rr \
-P 0 \
-c \
-C \
-- \
-T ${proto} \
-d rr \
-o $CONMETRICS,$RR_METRICS \
-r $rr_size"
echo $cmd
touch test_concurrent_avg_${proto}_rr_prep.csv
$cmd | tee -a test_concurrent_avg_${proto}_rr_prep.csv &
echo "END"
done
# here we create a csv structrue for avg results
wait
for i in $(seq 1 26) ; do
#this is how we take our average by dividing the results. print total/NR
cmd=$(awk -v var="$i" -F ',' '{total += $var } END { print total/NR } ' test_concurrent_avg_${proto}_rr_prep.csv )
echo $cmd | awk '{print}' ORS=',' >> /tmp/test_concurrent_avg_${proto}_rr.log
done
rm test_concurrent_avg_${proto}_rr_prep.csv
wait
echo -e "\n" >> /tmp/test_concurrent_avg_${proto}_rr.log
cat /tmp/test_concurrent_avg_${proto}_rr.log | grep . | sed 's/,\+$//' > /tmp/test_concurrent_avg_${proto}_rr_prep.log
done
# awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),asim'\n", $0); }' 12-01-2022-test_concurrent_avg_tcp_crr.csv
# we add total throughput and source address and Destination address to our csv structrue
awk -v var="$MAX_CPU_NUM" 'BEGIN{FS=OFS=","} {print $0, $0*(var+1),$8*(var+1),$9*(var+1)}' /tmp/test_concurrent_avg_${proto}_rr_prep.log > /tmp/test_concurrent_avg_${proto}_rr_sum.log
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/test_concurrent_avg_${proto}_rr_sum.log > /tmp/test_concurrent_avg_${proto}_rr_total.log
awk -v var="concurrent_${proto}_rr" 'BEGIN{FS=OFS=","}{print var OFS $0}' /tmp/test_concurrent_avg_${proto}_rr_total.log > test_concurrent_avg_${proto}_rr.log
awk -v var="$con_rr_header" -i inplace 'BEGINFILE{print var}{print}' test_concurrent_avg_${proto}_rr.log
cat test_concurrent_avg_${proto}_rr.log | grep -v error | uniq > concurrent_${proto}_rr_${lhostname}_${rhostname}_$(date +%s).csv
cat test_concurrent_avg_${proto}_rr.log > concurrent_${proto}_rr_${lhostname}_${rhostname}_$(date +%s).log
rm test_concurrent_avg_${proto}_rr.log
rm -f /tmp/test_concurrent_avg_${proto}_rr*
}
# This function helps us to run Request Response tests. Number of tests are determined by RR_SIZES*BURST_SIZES -o parameter help us to get the data in csv format.
function doudprr() {
local proto=$1
echo $(date +%s)
rm -f /tmp/${proto}_rr_${lhostname}_${rhostname}.log
rm -f ${proto}_rr_${lhostname}_${rhostname}.log
touch /tmp/${proto}_rr_${lhostname}_${rhostname}.log
for udp_rr_size in ${UDP_RR_SIZES}; do
for b in ${QD}; do
cmd="$CMD_PREFIX netperf \
-t omni \
-l 75 \
-H $rhost \
-s 2 \
-j \
-B ${proto}_rr \
-P 0 \
-T 0,0 \
-c \
-C \
-- \
-T ${proto} \
-d rr \
-o $METRICS,$RR_METRICS \
-r $udp_rr_size \
-b $b"
echo $cmd
nstat -rn
$cmd| tee -a /tmp/${proto}_rr_${lhostname}_${rhostname}.log
sed -i -e '$ s/$/',"$( nstat | grep '^[^#]*Ret[^#]*$' | awk '{print $2}' |awk '{s+=$1} END {printf "%.0f\n", s}')"'/' /tmp/${proto}_rr_${lhostname}_${rhostname}.log
echo "END"
done
done
# here we create our report csv -o parameter help us to get the data in csv format.
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/${proto}_rr_${lhostname}_${rhostname}.log > ${proto}_rr_${lhostname}_${rhostname}.log
awk -v var="$rr_metric_header" -i inplace 'BEGINFILE{print var}{print}' ${proto}_rr_${lhostname}_${rhostname}.log
cat ${proto}_rr_${lhostname}_${rhostname}.log | grep -v error | uniq > ${proto}_rr_${lhostname}_${rhostname}_$(date +%s).csv
cat ${proto}_rr_${lhostname}_${rhostname}.log> ${proto}_rr_${lhostname}_${rhostname}_$(date +%s).log
rm ${proto}_rr_${lhostname}_${rhostname}.log
rm /tmp/${proto}_rr_${lhostname}_${rhostname}.log
}
# MAX_CPU_NUM=$((`cat /proc/cpuinfo | grep processor | wc -l`-1))
# This function helps us to run Concurrent UDPRequest Response tests. Number of tests are determined by RR_SIZES*BURST_SIZES -o parameter help us to get the data in csv format.
# Number of Concurrent tests are determined by MAX_CPU_NUM*RR_SIZES*BURST_SIZES We take avg of tests by cpu numbers. i.e:4 cpu , 4 concurrent netperf tests. results are calculated by 4 concurrent tests results/4. We also add a columm for total throughput by sum of 4 tests
function docpuudprr(){
local proto=$1
con_rr_header="test_name,avg_throughput_per_thread,throughput_confidence_width,local_send_size,local_send_throughput,local_recv_throughput,remote_send_throughput,remote_recv_throughput,local_transport_retransmissions,remote_transport_retransmissions,local_cpu_util,local_cpu_peak_util,local_cpu_count,remote_cpu_util,remote_cpu_peak_util,remote_cpu_count,local_cpu_bind,remote_cpu_bind,transaction_rate_tran_per_sec,50th_percentile_latency_microseconds,90th_percentile_latency_microseconds,round_trip_latency_usec_per_tran,mean_latency_microseconds,stddev_latency_microseconds,request_size_bytes,response_size_bytes,initial_burst_requests,total_throughput,total_local_transport_retransmissions,total_remote_transport_retransmissions,source_address,destination_address"
rm -f /tmp/test_concurrent_avg_${proto}_rr.log
# rm -f test_concurrent_avg_${proto}_rr.log
for udp_rr_size in ${UDP_RR_SIZES}; do
for CPU in $(seq 0 $MAX_CPU_NUM); do
cmd="nice -20 $CMD_PREFIX netperf \
-T $CPU,$CPU \
-t omni \
-l 75 \
-H $rhost \
-s 2 \
-j \
-B concurrent_${proto}_rr \
-P 0 \
-c \
-C \
-- \
-T ${proto} \
-d rr \
-o $CONMETRICS,$RR_METRICS \
-r $udp_rr_size"
echo $cmd
touch test_concurrent_avg_${proto}_rr_prep.csv
$cmd | tee -a test_concurrent_avg_${proto}_rr_prep.csv &
echo "END"
done
# here we create a csv structrue for avg results
wait
for i in $(seq 1 26) ; do
#this is how we take our average by dividing the results. print total/NR
cmd=$(awk -v var="$i" -F ',' '{total += $var } END { print total/NR } ' test_concurrent_avg_${proto}_rr_prep.csv )
echo $cmd | awk '{print}' ORS=',' >> /tmp/test_concurrent_avg_${proto}_rr.log
done
rm test_concurrent_avg_${proto}_rr_prep.csv
wait
echo -e "\n" >> /tmp/test_concurrent_avg_${proto}_rr.log
cat /tmp/test_concurrent_avg_${proto}_rr.log | grep . | sed 's/,\+$//' > /tmp/test_concurrent_avg_${proto}_rr_prep.log
done
# awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),asim'\n", $0); }' 12-01-2022-test_concurrent_avg_tcp_crr.csv
# we add total throughput and source address and Destination address to our csv structrue
awk -v var="$MAX_CPU_NUM" 'BEGIN{FS=OFS=","} {print $0, $0*(var+1),$8*(var+1),$9*(var+1)}' /tmp/test_concurrent_avg_${proto}_rr_prep.log > /tmp/test_concurrent_avg_${proto}_rr_sum.log
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/test_concurrent_avg_${proto}_rr_sum.log > /tmp/test_concurrent_avg_${proto}_rr_total.log
awk -v var="concurrent_${proto}_rr" 'BEGIN{FS=OFS=","}{print var OFS $0}' /tmp/test_concurrent_avg_${proto}_rr_total.log > test_concurrent_avg_${proto}_rr.log
awk -v var="$con_rr_header" -i inplace 'BEGINFILE{print var}{print}' test_concurrent_avg_${proto}_rr.log
cat test_concurrent_avg_${proto}_rr.log | grep -v error | uniq > concurrent_${proto}_rr_${lhostname}_${rhostname}_$(date +%s).csv
cat test_concurrent_avg_${proto}_rr.log > concurrent_${proto}_rr_${lhostname}_${rhostname}_$(date +%s).log
rm test_concurrent_avg_${proto}_rr.log
rm -f /tmp/test_concurrent_avg_${proto}_rr*
}
# This function helps us to run Connect Request Response tests. Number of tests are determined by RR_SIZES*BURST_SIZES -o parameter help us to get the data in csv format.
function docrr(){
local proto=$1
echo $(date +%s)
rm -f ${proto}_crr_${lhostname}_${rhostname}.log
rm -f /tmp/${proto}_crr_${lhostname}_${rhostname}.log
touch /tmp/${proto}_crr_${lhostname}_${rhostname}.log
for rr_size in ${RR_SIZES}; do
for b in ${QD}; do
cmd="$CMD_PREFIX netperf \
-t omni \
-l 75 \
-H $rhost \
-B ${proto}_crr \
-c \
-C \
-s 2 \
-j \
-P 0 \
-T 0,0 \
-- \
-T ${proto} \
-d rr \
-c \
-o $METRICS,$RR_METRICS \
-r $rr_size \
-b $b"
echo $cmd
nstat -rn
$cmd | tee -a /tmp/${proto}_crr_${lhostname}_${rhostname}.log
# adds netstat local_netstat_retransmission to test results
sed -i -e '$ s/$/',"$( nstat | grep '^[^#]*Ret[^#]*$' | awk '{print $2}' |awk '{s+=$1} END {printf "%.0f\n", s}')"'/' /tmp/${proto}_crr_${lhostname}_${rhostname}.log
echo "END"
done
done
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/${proto}_crr_${lhostname}_${rhostname}.log > ${proto}_crr_${lhostname}_${rhostname}.log
awk -v var="$rr_metric_header" -i inplace 'BEGINFILE{print var}{print}' ${proto}_crr_${lhostname}_${rhostname}.log
cat ${proto}_crr_${lhostname}_${rhostname}.log | grep -v error | uniq > ${proto}_crr_${lhostname}_${rhostname}_$(date +%s).csv
cat ${proto}_crr_${lhostname}_${rhostname}.log> ${proto}_crr_${lhostname}_${rhostname}_$(date +%s).log
rm ${proto}_crr_${lhostname}_${rhostname}.log
rm /tmp/${proto}_crr_${lhostname}_${rhostname}.log
}
# This function helps us to run Concurrent Connect Request Response tests. Number of tests are determined by RR_SIZES*BURST_SIZES -o parameter help us to get the data in csv format.
# Number of Concurrent tests are determined by MAX_CPU_NUM*RR_SIZES*BURST_SIZES We take avg of tests by cpu numbers. i.e:4 cpu , 4 concurrent netperf tests. results are calculated by 4 concurrent tests results/4. We also add a columm for total throughput by sum of 4 tests
function docpucrr(){
local proto=$1
con_rr_header="test_name,avg_throughput_per_thread,throughput_confidence_width,local_send_size,local_send_throughput,local_recv_throughput,remote_send_throughput,remote_recv_throughput,local_transport_retransmissions,remote_transport_retransmissions,local_cpu_util,local_cpu_peak_util,local_cpu_count,remote_cpu_util,remote_cpu_peak_util,remote_cpu_count,local_cpu_bind,remote_cpu_bind,transaction_rate_tran_per_sec,50th_percentile_latency_microseconds,90th_percentile_latency_microseconds,round_trip_latency_usec_per_tran,mean_latency_microseconds,stddev_latency_microseconds,request_size_bytes,response_size_bytes,initial_burst_requests,total_throughput,total_local_transport_retransmissions,total_remote_transport_retransmissions,source_address,destination_address"
rm -f /tmp/test_concurrent_avg_${proto}_crr.log
# rm test_concurrent_avg_${proto}_crr.log
for rr_size in ${RR_SIZES}; do
for CPU in $(seq 0 $MAX_CPU_NUM); do
cmd="nice -20 $CMD_PREFIX netperf \
-T $CPU,$CPU \
-t omni \
-l 75 \
-H $rhost \
-s 2 \
-j \
-B concurrent_${proto}_crr \
-P 0 \
-c \
-C \
-- \
-T ${proto} \
-d rr \
-c \
-o $CONMETRICS,$RR_METRICS \
-r $rr_size"
echo $cmd
touch test_concurrent_avg_${proto}_crr_prep.csv
$cmd | tee -a test_concurrent_avg_${proto}_crr_prep.csv &
echo "END"
done
wait
for i in $(seq 1 26) ; do
cmd=$(awk -v var="$i" -F ',' '{total += $var } END { print total/NR } ' test_concurrent_avg_${proto}_crr_prep.csv )
echo $cmd | awk '{print}' ORS=',' >> /tmp/test_concurrent_avg_${proto}_crr.log
done
rm test_concurrent_avg_${proto}_crr_prep.csv
wait
echo -e "\n" >> /tmp/test_concurrent_avg_${proto}_crr.log
cat /tmp/test_concurrent_avg_${proto}_crr.log | grep . | sed 's/,\+$//' > /tmp/test_concurrent_avg_${proto}_crr_prep.log
done
awk -v var="$MAX_CPU_NUM" 'BEGIN{FS=OFS=","} {print $0, $0*(var+1),$8*(var+1),$9*(var+1)}' /tmp/test_concurrent_avg_${proto}_crr_prep.log > /tmp/test_concurrent_avg_${proto}_crr_sum.log
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/test_concurrent_avg_${proto}_crr_sum.log > /tmp/test_concurrent_avg_${proto}_crr_total.log
awk -v var="concurrent_${proto}_crr" 'BEGIN{FS=OFS=","}{print var OFS $0}' /tmp/test_concurrent_avg_${proto}_crr_total.log > test_concurrent_avg_${proto}_crr.log
awk -v var="$con_rr_header" -i inplace 'BEGINFILE{print var}{print}' test_concurrent_avg_${proto}_crr.log
cat test_concurrent_avg_${proto}_crr.log | grep -v error | uniq > concurrent_${proto}_crr_${lhostname}_${rhostname}_$(date +%s).csv
cat test_concurrent_avg_${proto}_crr.log > concurrent_${proto}_crr_${lhostname}_${rhostname}_$(date +%s).log
rm test_concurrent_avg_${proto}_crr.log
rm -f /tmp/test_concurrent_avg_${proto}_crr*
}
# This function helps us to run Stream tests. There are two type of stream tests; Stream-Maerts Stream==>(Upload netperf to netserver ) Maerts ==>( Download netserver to netperf ) Number of tests are determined by Message_Sizes -o parameter help us to get the data in csv format.
function dostream() {
local ty=$1
echo $(date +%s)
rm -f ${ty}_stream_${lhostname}_${rhostname}.log
rm -f /tmp/${ty}_stream_${lhostname}_${rhostname}.log
touch /tmp/${ty}_stream_${lhostname}_${rhostname}.log
for m_size in ${Message_Size}; do
cmd="$CMD_PREFIX netperf \
-H $rhost \
-B ${ty}_stream \
-l 120 \
-t omni \
-s 2 \
-j \
-P 0 \
-T 0,0 \
-c \
-C \
-- \
-R 1 \
-T $ty \
-d stream \
-m $m_size \
-o $METRICS"
echo $cmd
nstat -rn
$cmd | tee -a /tmp/${ty}_stream_${lhostname}_${rhostname}.log
sed -i -e '$ s/$/',"$( nstat | grep '^[^#]*Ret[^#]*$' | awk '{print $2}' |awk '{s+=$1} END {printf "%.0f\n", s}')"'/' /tmp/${ty}_stream_${lhostname}_${rhostname}.log
echo "END"
done
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/${ty}_stream_${lhostname}_${rhostname}.log > ${ty}_stream_${lhostname}_${rhostname}.log
awk -v var="$metric_header" -i inplace 'BEGINFILE{print var}{print}' ${ty}_stream_${lhostname}_${rhostname}.log
cat ${ty}_stream_${lhostname}_${rhostname}.log | grep -v error | uniq > ${ty}_stream_${lhostname}_${rhostname}_$(date +%s).csv
cat ${ty}_stream_${lhostname}_${rhostname}.log> ${ty}_stream_${lhostname}_${rhostname}_$(date +%s).log
rm ${ty}_stream_${lhostname}_${rhostname}.log
rm /tmp/${ty}_stream_${lhostname}_${rhostname}.log
}
# This function helps us to run Concurrent stream tests . Number of tests are determined by Message_Size -o parameter help us to get the data in csv format.
# Number of Concurrent tests are determined by MAX_CPU_NUM*Message_Size We take avg of tests by cpu numbers. i.e:4 cpu , 4 concurrent netperf tests. results are calculated by 4 concurrent tests results/4. We also add a columm for total throughput by sum of 4 tests
function docpustream() {
local ty=$1
con_stream_header="test_name,avg_throughput_per_thread,throughput_confidence_width,local_send_size,local_send_throughput,local_recv_throughput,remote_send_throughput,remote_recv_throughput,local_transport_retransmissions,remote_transport_retransmissions,local_cpu_util,local_cpu_peak_util,local_cpu_count,remote_cpu_util,remote_cpu_peak_util,remote_cpu_count,local_cpu_bind,remote_cpu_bind,total_throughput,total_local_transport_retransmissions,total_remote_transport_retransmissions,source_address,destination_address"
rm -f /tmp/test_concurrent_avg_${ty}_stream.log
# rm test_concurrent_avg_$ty.log
for m_size in ${Message_Size}; do
for CPU in $(seq 0 $MAX_CPU_NUM); do
cmd="nice -20 $CMD_PREFIX netperf \
-T $CPU,$CPU \
-t omni \
-l 120 \
-s 2 \
-H $rhost \
-j \
-B concurrent_${ty}_stream \
-P 0 \
-c \
-C \
-- \
-R 1 \
-T $ty \
-d stream \
-m $m_size \
-o $CONMETRICS"
echo $cmd
touch test_concurrent_avg_${ty}_stream-_prep.csv
$cmd | tee -a test_concurrent_avg_${ty}_stream-_prep.csv &
echo "END"
done
wait
for i in $(seq 1 17) ; do
cmd=$(awk -v var="$i" -F ',' '{total += $var } END { print total/NR } ' test_concurrent_avg_${ty}_stream-_prep.csv )
echo $cmd | awk '{print}' ORS=',' >> /tmp/test_concurrent_avg_${ty}_stream.log
done
rm test_concurrent_avg_${ty}_stream-_prep.csv
wait
echo -e "\n" >> /tmp/test_concurrent_avg_${ty}_stream.log
cat /tmp/test_concurrent_avg_${ty}_stream.log | grep . | sed 's/,\+$//' > /tmp/test_concurrent_avg_${ty}_stream-_prep.log
done
awk -v var="$MAX_CPU_NUM" 'BEGIN{FS=OFS=","} {print $0, $0*(var+1),$8*(var+1),$9*(var+1)}' /tmp/test_concurrent_avg_${ty}_stream-_prep.log > /tmp/test_concurrent_avg_${ty}_stream-_sum.log
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/test_concurrent_avg_${ty}_stream-_sum.log > /tmp/test_concurrent_avg_${ty}_stream-_total.log
awk -v var="concurrent_${ty}_stream" 'BEGIN{FS=OFS=","}{print var OFS $0}' /tmp/test_concurrent_avg_${ty}_stream-_total.log > test_concurrent_avg_${ty}_stream.log
awk -v var="$con_stream_header" -i inplace 'BEGINFILE{print var}{print}' test_concurrent_avg_${ty}_stream.log
cat test_concurrent_avg_${ty}_stream.log | grep -v error | uniq > concurrent_${ty}_stream_${lhostname}_${rhostname}_$(date +%s).csv
cat test_concurrent_avg_${ty}_stream.log > concurrent_${ty}_stream_${lhostname}_${rhostname}_$(date +%s).log
rm test_concurrent_avg_${ty}_stream.log
rm /tmp/test_concurrent_avg_${ty}_stream*
}
function domaerts() {
local ty=$1
echo $(date +%s)
rm -f ${ty}_maerts_${lhostname}_${rhostname}.log
rm -f /tmp/${ty}_maerts_${lhostname}_${rhostname}.log
touch /tmp/${ty}_maerts_${lhostname}_${rhostname}.log
for m_size in ${Message_Size}; do
cmd="$CMD_PREFIX netperf \
-H $rhost \
-B ${ty}_maerts \
-l 120 \
-t omni \
-s 2 \
-j \
-P 0 \
-T 0,0 \
-c \
-C \
-- \
-R 1 \
-T $ty \
-d maerts \
-m $m_size \
-o $METRICS"
echo $cmd
nstat -rn
$cmd | tee -a /tmp/${ty}_maerts_${lhostname}_${rhostname}.log
sed -i -e '$ s/$/',"$( nstat | grep '^[^#]*Ret[^#]*$' | awk '{print $2}' |awk '{s+=$1} END {printf "%.0f\n", s}')"'/' /tmp/${ty}_maerts_${lhostname}_${rhostname}.log
echo "END"
done
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/${ty}_maerts_${lhostname}_${rhostname}.log > ${ty}_maerts_${lhostname}_${rhostname}.log
awk -v var="$metric_header" -i inplace 'BEGINFILE{print var}{print}' ${ty}_maerts_${lhostname}_${rhostname}.log
cat ${ty}_maerts_${lhostname}_${rhostname}.log | grep -v error | uniq > ${ty}_maerts_${lhostname}_${rhostname}_$(date +%s).csv
cat ${ty}_maerts_${lhostname}_${rhostname}.log> ${ty}_maerts_${lhostname}_${rhostname}_$(date +%s).log
rm ${ty}_maerts_${lhostname}_${rhostname}.log
rm /tmp/${ty}_maerts_${lhostname}_${rhostname}.log
}
# This function helps us to run Concurrent stream tests . Number of tests are determined by Message_Size -o parameter help us to get the data in csv format.
# Number of Concurrent tests are determined by MAX_CPU_NUM*Message_Size We take avg of tests by cpu numbers. i.e:4 cpu , 4 concurrent netperf tests. results are calculated by 4 concurrent tests results/4. We also add a columm for total throughput by sum of 4 tests
function docpumaerts() {
local ty=$1
con_stream_header="test_name,avg_throughput_per_thread,throughput_confidence_width,local_send_size,local_send_throughput,local_recv_throughput,remote_send_throughput,remote_recv_throughput,local_transport_retransmissions,remote_transport_retransmissions,local_cpu_util,local_cpu_peak_util,local_cpu_count,remote_cpu_util,remote_cpu_peak_util,remote_cpu_count,local_cpu_bind,remote_cpu_bind,total_throughput,total_local_transport_retransmissions,total_remote_transport_retransmissions,source_address,destination_address"
rm -f /tmp/test_concurrent_avg_$ty_maerts.log
# rm test_concurrent_avg_$ty.log
for m_size in ${Message_Size}; do
for CPU in $(seq 0 $MAX_CPU_NUM); do
cmd="nice -20 $CMD_PREFIX netperf \
-T $CPU,$CPU \
-t omni \
-l 120 \
-s 2 \
-H $rhost \
-j \
-B concurrent_${ty}_maerts \
-P 0 \
-c \
-C \
-- \
-R 1 \
-T $ty \
-d maerts \
-m $m_size \
-o $CONMETRICS"
echo $cmd
touch test_concurrent_avg_${ty}_maerts-_prep.csv
$cmd | tee -a test_concurrent_avg_${ty}_maerts-_prep.csv &
echo "END"
done
wait
for i in $(seq 1 17) ; do
cmd=$(awk -v var="$i" -F ',' '{total += $var } END { print total/NR } ' test_concurrent_avg_${ty}_maerts-_prep.csv )
echo $cmd | awk '{print}' ORS=',' >> /tmp/test_concurrent_avg_${ty}_maerts.log
done
rm test_concurrent_avg_${ty}_maerts-_prep.csv
wait
echo -e "\n" >> /tmp/test_concurrent_avg_${ty}_maerts.log
cat /tmp/test_concurrent_avg_${ty}_maerts.log | grep . | sed 's/,\+$//' > /tmp/test_concurrent_avg_${ty}_maerts-_prep.log
done
awk -v var="$MAX_CPU_NUM" 'BEGIN{FS=OFS=","} {print $0, $0*(var+1),$8*(var+1),$9*(var+1)}' /tmp/test_concurrent_avg_${ty}_maerts-_prep.log > /tmp/test_concurrent_avg_${ty}_maerts-_sum.log
awk '{ printf("%s,'$(hostname -i | awk '{print $1}' ),$rhost'\n", $0); }' /tmp/test_concurrent_avg_${ty}_maerts-_sum.log > /tmp/test_concurrent_avg_${ty}_maerts-_total.log
awk -v var="concurrent_${ty}_maerts" 'BEGIN{FS=OFS=","}{print var OFS $0}' /tmp/test_concurrent_avg_${ty}_maerts-_total.log > test_concurrent_avg_${ty}_maerts.log
awk -v var="$con_stream_header" -i inplace 'BEGINFILE{print var}{print}' test_concurrent_avg_${ty}_maerts.log
cat test_concurrent_avg_${ty}_maerts.log | grep -v error | uniq > concurrent_${ty}_maerts_${lhostname}_${rhostname}_$(date +%s).csv
cat test_concurrent_avg_${ty}_maerts.log > concurrent_${ty}_maerts_${lhostname}_${rhostname}_$(date +%s).log
rm test_concurrent_avg_${ty}_maerts.log
rm /tmp/test_concurrent_avg_${ty}_maerts*
}
# Number of loops. This variable determines how many times ALL TESTS will be runned. Changing is not advised.
nloops=1
# All of the parameters you can give to the script conc means concurrent tests
while true; do
case $1 in
--tcp_stream)
run_tcp_stream=1
;;
--tcp_maerts)
run_tcp_maerts=1
;;
--tcp_rr)
run_tcp_rr=1
;;
--tcp_crr)
run_tcp_crr=1
;;
--udp_stream)
run_udp_stream=1
;;
--udp_rr)
run_udp_rr=1
;;
--conc_tcp_stream)
run_conc_tcp_stream=1
;;
--conc_tcp_maerts)
run_conc_tcp_maerts=1
;;
--conc_tcp_rr)
run_conc_tcp_rr=1
;;
--conc_tcp_crr)
run_conc_tcp_crr=1
;;
--conc_udp_stream)
run_conc_udp_stream=1
;;
--conc_udp_rr)
run_conc_udp_rr=1
;;
--all)
run_tcp_stream=1
run_udp_stream=1
run_tcp_rr=1
run_udp_rr=1
run_tcp_crr=1
run_conc_tcp_stream=1
run_conc_udp_stream=1
run_conc_tcp_rr=1
run_conc_udp_rr=1
run_conc_tcp_crr=1
;;
--concall)
run_conc_tcp_stream=1
run_conc_udp_stream=1
run_conc_tcp_rr=1
run_conc_udp_rr=1
run_conc_tcp_crr=1
;;
--dry-run)
CMD_PREFIX="echo"
;;
--nloops)
if [ "$2" ]; then
nloops=$2
shift
else
echo >2 "--nloops requires argument"
exit 1
fi
;;
-?*)
echo 'WARN: Unknown option (ignored): %s' "$1" >&2
;;
--)
shift
break
;;
*)
break
esac
shift
done
if [ -z "$1" ]; then
echo "Usage: $0 [--{tcp,udp}_stream] [--tcp_maerts] [--{tcp,udp}_rr] [--conc_tcp/udp_tests] [--all] [--concall] [--nloops n] <remote_host_IP> <remotehostname> <localhostname> #conc means concurrent"
exit 1
fi
rhost=$1
rhostname=$2
lhostname=$3
for _ in $(seq $nloops) ; do
if [ "$run_tcp_stream" == "1" ]; then
dostream "tcp"
fi
if [ "$run_tcp_maerts" == "1" ]; then
domaerts "tcp"
fi
if [ "$run_udp_stream" == "1" ]; then
dostream "udp"
fi
# RR
if [ "$run_tcp_rr" == "1" ]; then
dorr "tcp"
fi
if [ "$run_tcp_crr" == "1" ]; then
docrr "tcp"
fi
if [ "$run_udp_rr" == "1" ]; then
doudprr "udp"
fi
#Concurrent tests
if [ "$run_conc_tcp_stream" == "1" ]; then
docpustream "tcp"
fi
if [ "$run_conc_tcp_maerts" == "1" ]; then
docpumaerts "tcp"
fi
if [ "$run_conc_udp_stream" == "1" ]; then
docpustream "udp"
fi
# RR
if [ "$run_conc_tcp_rr" == "1" ]; then
docpurr "tcp"
fi
if [ "$run_conc_tcp_crr" == "1" ]; then
docpucrr "tcp"
fi
if [ "$run_conc_udp_rr" == "1" ]; then
docpuudprr "udp"
fi
done
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment