Skip to content

Instantly share code, notes, and snippets.

@phobos182
Last active December 16, 2016 06:23
Show Gist options
  • Save phobos182/f440c43ba803df7d39c820cc4563468f to your computer and use it in GitHub Desktop.
Save phobos182/f440c43ba803df7d39c820cc4563468f to your computer and use it in GitHub Desktop.
#!/bin/bash
#
# constants
IFACE=eth0
TX_QUEUE_LEN=5000
RPS_FLOW_CNT=32768
QUEUES=$(ls -d /sys/class/net/${IFACE}/queues/rx-* | wc -l)
RPS_FLOW_QUEUE=$((RPS_FLOW_CNT / QUEUES))
# set global flow queue
echo "Global rps_flow_cnt: ${RPS_FLOW_CNT}"
echo ${RPS_FLOW_CNT} > /proc/sys/net/core/rps_sock_flow_entries
echo "Recieve queues: ${QUEUES}"
# set per queue flow queue depth
for q in `seq 0 $((QUEUES-1))`; do
echo "/sys/class/net/${IFACE}/queues/rx-${q}/rps_flow_cnt=${RPS_FLOW_QUEUE}"
echo $RPS_FLOW_QUEUE > /sys/class/net/${IFACE}/queues/rx-${q}/rps_flow_cnt
done
# set transaction queue length
echo "TxQueueLength: ${TX_QUEUE_LEN}"
ifconfig ${IFACE} txqueuelen ${TX_QUEUE_LEN}
# balance irqs for IFACE
/usr/local/bin/set_irq_affinity $IFACE
#!/bin/bash
set -e
#
# Prints IRQ numbers for the given physical interface
#
get_irqs()
{
local iface=$1
grep "${iface}.*TxRx" /proc/interrupts | grep -v fdir | cut -f 1 -d :
}
#
# Bind Transmit Packet Steering avoiding IRQ affinity cores
#
setup_xps()
{
local iface=$1
local cpuset=$2
local mask
local i=0
local xps_queues_count=`ls -1 /sys/class/net/$iface/queues/*/xps_cpus | wc -l`
for mask in `hwloc-distrib --to core --restrict $cpuset $xps_queues_count`
do
set_one_mask "/sys/class/net/$iface/queues/tx-$i/xps_cpus" $mask
i=$(( i + 1 ))
done
}
#
# Bind Receive Packet Steering avoiding IRQ affinity cores
#
setup_rps()
{
local iface=$1
local cpuset=$2
local mask
local i=0
local rps_queues_count=`ls -1 /sys/class/net/$iface/queues/*/rps_cpus | wc -l`
for mask in `hwloc-distrib --to core --restrict $cpuset $rps_queues_count`
do
set_one_mask "/sys/class/net/$iface/queues/rx-$i/rps_cpus" $mask
i=$(( i + 1 ))
done
}
#
# Bind IRQ for TxRx queues according to NUMA distribution
#
distribute_irqs()
{
local iface=$1
local cpu_mask=$2
local irqs=( `get_irqs $iface` )
local mask
local i=0
for mask in $cpu_mask
do
set_one_mask "/proc/irq/${irqs[$i]}/smp_affinity" $mask
i=$(( i + 1 ))
done
}
#
# Bind XPS / RPS / IRQ Affinity to CPUs other than CPU0 and its hyper-threading siblings
#
# Use hwloc-distrib for generating the appropriate CPU masks for NUMA awareness.
#
setup()
{
local iface=$1
local mask
local i=0
# If we are in a single core environment - there is no point in configuring RPS
[[ `hwloc-calc core:0.pu:all` -eq `hwloc-calc all` ]] && return
local queues_count=`ls -1 /sys/class/net/$iface/queues/*/rps_cpus | wc -l`
# Calc IRQ single mask for 1 core per NUMA domain based on # of TxRx queues
cpu_mask=`hwloc-distrib --to core --single --restrict $(hwloc-calc all ~core:0) $queues_count`
distribute_irqs $iface "$cpu_mask"
# convert to an exclude set "~" operation for last filter
filterset=`echo $cpu_mask | sed -e 's/\n/ /g' | sed -e 's/ / ~/g'`
# calculate new cpu range filtering what we have already bound and physical cpu 0
mask_filter=`hwloc-calc all ~core:0 ~${filterset}`
# now setup receive packet steering
setup_rps $iface $mask_filter
# now setup transmit flow steering per queue
setup_xps $iface $mask_filter
}
#
# set_one_mask <CPU mask>
#
set_one_mask()
{
local cpuset_conf_file=$1
local mask=`echo $2 | sed -e 's/0x//g'`
echo -n "Setting mask $mask in $cpuset_conf_file"
taskset -p $mask $$ &> /dev/null
echo " - [cpu list]: $(taskset -c -p $$ | awk '{print $NF}')"
echo $mask > $cpuset_conf_file
}
## MAIN ##
echo "Network Affinity Script"
echo "======================="
lscpu | grep -i numa
echo "======================="
setup $1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment