Skip to content

Instantly share code, notes, and snippets.

@ismell
Last active October 4, 2023 21:17
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ismell/f02b2baf7c59bda21439c365f9e3ad6b to your computer and use it in GitHub Desktop.
Save ismell/f02b2baf7c59bda21439c365f9e3ad6b to your computer and use it in GitHub Desktop.
ChromiumOS Development Scripts
#!/bin/bash
set -eu
: "${CACHE_DIR:=/tmp/coreboot}"
: "${VBOOT_SOURCE:=/mnt/host/source//src/platform/vboot_reference}"
: "${HOSTCC:=x86_64-pc-linux-gnu-clang}"
: "${HOSTPKGCONFIG:=x86_64-pc-linux-gnu-pkg-config}"
: "${XGCCPATH:=/opt/coreboot-sdk/bin/}"
: "${BOARD:=zork}"
export XGCCPATH
if [[ "$#" -eq 0 ]]; then
echo "Usage: $0 <board|config file>..."
echo "e.g., $0 trembyle mandolin-hardcore"
exit 1
fi
if [[ -z "$BOARD" ]]; then
echo "BOARD is not set" >&2
exit 1
fi
declare FIRMWARE_ROOT=/build/$BOARD/firmware
declare KEYDIR=/usr/share/vboot/devkeys
declare -a CONFIGS
declare -a CONFIG_PATHS=(
# "$HOME/trunk/src/overlays/overlay-$BOARD/sys-boot/coreboot-$BOARD/files/configs"
"$HOME/trunk/src/third_party/chromiumos-overlay/sys-boot/coreboot/files/configs/"
"./configs"
)
em100=
psp_verstage=
psp_bootloader=
menuconfig=
clean=
no_serial=
gbb_flags=
signed_verstage=1
depthcharge_payload="dev.elf"
do_flash=0
while [[ "$#" -gt 0 ]]; do
arg="$1"
shift
if [[ "$arg" = "--" ]]; then
break;
fi
if [[ "$arg" = "-e" || "$arg" == "--em100" ]]; then
em100=y
echo "Using em100 mode"
continue
fi
if [[ "$arg" = "--psp-verstage" ]]; then
psp_verstage=y
psp_bootloader="${HOME}/trunk/src/private-overlays/chipset-picasso-private/sys-boot/coreboot-private-files-chipset-picasso/files/debug_psp/PspBootLoader_test_RV_dbg.sbin"
echo "Building PSP verstage"
continue
fi
if [[ "$arg" = "--psp-bootloader" ]]; then
psp_bootloader="$1"
echo "Using custom psp bootloader"
shift
continue
fi
if [[ "$arg" = "--gbb" ]]; then
gbb_flags="$1"
shift
continue
fi
if [[ "$arg" = "--clean" ]]; then
clean=1
continue
fi
if [[ "$arg" = "--menuconfig" ]]; then
menuconfig=1
continue
fi
if [[ "$arg" = "--netboot" ]]; then
depthcharge_payload="netboot.elf"
continue
fi
if [[ "$arg" = "--no-serial" ]]; then
no_serial=1
continue
fi
if [[ "$arg" = "--serial" ]]; then
no_serial=
continue
fi
if [[ "$arg" = "--signed-verstage" ]]; then
signed_verstage=1
continue
fi
if [[ "$arg" = "--no-signed-verstage" ]]; then
signed_verstage=0
continue
fi
if [[ "$arg" = "--flash" ]]; then
do_flash=1
continue
fi
if [[ -e "$arg" ]]; then
CONFIGS+=("$arg")
continue
fi
found=0
for config_path in "${CONFIG_PATHS[@]}"; do
if [[ -e "$config_path/config.$arg" ]]; then
CONFIGS+=("$config_path/config.$arg")
found=1
break
fi
done
if [[ "$found" -eq 0 ]]; then
echo "Failed to find config for '$arg'"
exit 1
fi
done
declare -a MAKE_OPTS=("$@")
declare -a CLEANUP
declare UNMOUNT=""
function finish {
cd /
if [[ -n "$UNMOUNT" ]]; then
sudo umount "$UNMOUNT"
fi
local dir
for dir in "${CLEANUP[@]+"${CLEANUP[@]}"}"; do
if [[ -e "$dir" ]]; then
rm -r "$dir"
fi
done
}
trap finish EXIT
# $1: cache_dir
# $2: work_dir
# $3: config_path
function build-coreboot-rom() {
local cache_dir="$1" work_dir="$2" config_path="$3"
cp "$config_path" "$work_dir/.config"
if [[ "$signed_verstage" -eq 0 ]]; then
sed -i -e '/CONFIG_PSP_VERSTAGE_FILE/d' -e '/PSP_VERSTAGE_SIGNING_TOKEN/d' "$work_dir/.config"
fi
{
declare serial_config="$HOME/trunk/src/third_party/chromiumos-overlay/sys-boot/coreboot/files/configs/fwserial.$BOARD"
if [[ -z "$no_serial" ]]; then
cat "${serial_config}"
# Sometimes the fwserial config is missing a newline
echo
fi
if [[ -n "${em100}" ]]; then
echo CONFIG_EM100=y
fi
if [[ -n "${psp_verstage}" ]]; then
# TODO: Enable once this exists
: echo CONFIG_VBOOT_STARTS_BEFORE_BOOTBLOCK=y
fi
if [[ -n "${psp_bootloader}" ]]; then
if [[ ${psp_bootloader} =~ " " ]]; then
echo "PSP Bootloader path contains a space!" >&2
echo "Copying to workdir, this will result in a cache bust" >&2
cp "$psp_bootloader" "$work_dir/"
psp_bootloader="${work_dir}/$(basename "$psp_bootloader")"
fi
echo "CONFIG_PSP_BOOTLOADER_FILE=\"${psp_bootloader}\""
fi
} >> "$work_dir/.config"
cat "$work_dir/.config"
local make_cmd="olddefconfig"
if [[ "$menuconfig" -eq 1 ]]; then
make_cmd="menuconfig"
fi
if ! make \
obj="$work_dir" \
DOTCONFIG="$work_dir/.config" \
HOSTCC="$HOSTCC"\
HOSTPKGCONFIG="$HOSTPKGCONFIG" \
VBOOT_SOURCE="$VBOOT_SOURCE" \
UPDATED_SUBMODULES=1 \
"$make_cmd"; then
cp $work_dir/.config /tmp/bad.config
echo "Failed to make $make_cmd" >&2;
echo "See /tmp/bad.config" >&2;
exit 1
fi
if [[ ! -e "$cache_dir/.config" ]] || ! cmp "$work_dir/.config" "$cache_dir/.config"; then
echo "Cache busted: $cache_dir"
# Bust the cache anytime the .config is updated
rm -rf "$cache_dir"
mkdir -p "$cache_dir"
cp "$work_dir/.config" "$cache_dir/.config"
else
echo "Reusing cache at $cache_dir"
fi
# Store git metadata
# git log --oneline '@{u}~..' > "$cache_dir/coreboot.patch" || true
# git diff --cached >> "$cache_dir/coreboot.patch"
# git diff >> "$cache_dir/coreboot.patch"
pwd
make -j \
obj="$cache_dir" \
DOTCONFIG="$cache_dir/.config" \
HOSTCC="$HOSTCC"\
HOSTPKGCONFIG="$HOSTPKGCONFIG" \
VBOOT_SOURCE="$VBOOT_SOURCE" \
UPDATED_SUBMODULES=1 \
"${MAKE_OPTS[@]+${MAKE_OPTS[@]}}"
# Kconfig generates a dummy file that doesn't get removed.
rm -f ..config.tmp.*
echo "Successfully built $cache_dir"
}
# Use make compress assets in parallel
# $1: src
# $2: dst
# $3: image.bin
function compress-assets() {
local src="$1" dest="$2"
echo src="$src" dest="$dest"
make -j -f - src="$src" dest="$dest" << 'EOF'
.DELETE_ON_ERROR:
inputs:=$(wildcard $(src)/*)
outputs:=$(inputs:$(src)/%=$(dest)/%)
$(info Inputs: ${inputs})
$(info Outputs: ${outputs})
.PHONY: all
all: $(outputs)
$(dest):
mkdir -p "$@"
$(dest)/%: $(src)/% | $(dest)
cbfs-compression-tool compress "$<" "$@" LZMA
EOF
}
# $1: work_dir
# $2: image.bin
# $3: slot
function sign-region() {
local work_dir="$1" image="$2" slot="$3"
local main="FW_MAIN_${slot}" vblock="VBLOCK_${slot}"
local -i size
size="$(cbfstool "${image}" truncate -r "$main")"
cbfstool "${image}" read -r "$main" -f "$work_dir/$main"
truncate -s "$size" "$work_dir/$main"
cbfstool "${image}" write --force -u -i 255 \
-r "$main" -f "$work_dir/$main"
futility vbutil_firmware \
--vblock "$work_dir/$vblock" \
--keyblock "${KEYDIR}/firmware.keyblock" \
--signprivate "${KEYDIR}/firmware_data_key.vbprivk" \
--version 1 \
--fv "$work_dir/$main" \
--kernelkey "${KEYDIR}/kernel_subkey.vbpubk" \
--flags 0
cbfstool "${image}" write -u -i 255 -r "${vblock}" \
-f "$work_dir/$vblock"
}
# Hash the payload of an altfw alternative bootloader
# Loads the payload from $rom on RW_LEGACY under:
# altfw/<name>
# Stores the hash into $rom on RW-A and RW-B as:
# altfw/<name>.sha256
# Args:
# $1: work_dir
# $1: rom file where the payload can be found
# $2: name of the alternative bootloader
function hash-altfw-payload() {
local work_dir="$1"
local rom="$2"
local name="$3"
local payload_file="altfw/${name}"
local hash_file="${payload_file}.sha256"
local tmpfile="${work_dir}/file"
local tmphash="${work_dir}/hash"
# Grab the raw uncompressed payload (-U) and hash it into $tmphash.
cbfstool "${rom}" extract -r RW_LEGACY -n "${payload_file}" \
-f "${tmpfile}" -U >/dev/null
openssl dgst -sha256 -binary "${tmpfile}" > "${tmphash}"
# Copy $tmphash into RW-A and RW-B.
cbfstool "${rom}" add -r FW_MAIN_A,FW_MAIN_B \
-f "${tmphash}" -n "${hash_file}" -t raw
}
# Hash the EC firmware
#
# Args:
# $1: work_dir
# $1: AP bin
# $2: EC bin
function add-ec() {
local work_dir="$1"
local rom="$2"
local ec="$3"
local hash="${work_dir}/ec.hash"
local comp_type=lzma
openssl dgst -sha256 -binary "${ec}" > "${hash}"
cbfstool "${rom}" add -r FW_MAIN_A,FW_MAIN_B -t raw -c "${comp_type}" \
-f "${ec}" -n "ecrw" # -p "${pad}"
cbfstool "${rom}" add -r FW_MAIN_A,FW_MAIN_B -t raw -c none \
-f "${hash}" -n "ecrw.hash"
}
# $1: cache_dir
# $2: work_dir
# $3: image.bin
function add-cbfs-master-header() {
local cache_dir="$1" work_dir="$2" image="$3"
# Add master header to the RW_LEGACY section
printf "ptr_" > "${work_dir}/ptr"
cbfstool "${image}" add -r RW_LEGACY -f "${work_dir}/ptr" -n "header pointer" \
-t "cbfs header" -b -4
cbfstool "${image}" add-master-header -r RW_LEGACY
}
# $1: cache_dir
# $2: work_dir
# $3: image.bin
function add-tianocore() {
local cache_dir="$1" work_dir="$2" image="$3"
local payload="${FIRMWARE_ROOT}/tianocore/UEFIPAYLOAD.fd"
local bl_list="${work_dir}/altfw"
if [ ! -e "$payload" ]; then
return
fi
set -x
cbfstool "${image}" add-payload -r RW_LEGACY \
-n altfw/tianocore -c lzma -f \
"${FIRMWARE_ROOT}/tianocore/UEFIPAYLOAD.fd"
hash-altfw-payload "${work_dir}" "${image}" tianocore
touch "${bl_list}"
# For now, use TianoCore as the default.
echo "1;altfw/tianocore;TianoCore;TianoCore bootloader" \
>> "${bl_list}"
cbfstool "${image}" add -r RW_LEGACY -n altfw/list -t raw -f "${bl_list}"
}
# $1: cache_dir
# $2: work_dir
# $3: image.bin
# $4: build_name
function add-depthcharge() {
local cache_dir="$1" work_dir="$2" image="$3" build_name="$4"
local payload="$FIRMWARE_ROOT/${build_name}/depthcharge/${depthcharge_payload}"
if [[ ! -f "$payload" ]]; then
payload="$FIRMWARE_ROOT/$BOARD/depthcharge/${depthcharge_payload}"
fi
# local payload="/build/$BOARD/var/cache/portage/sys-boot/depthcharge/$BOARD/${depthcharge_payload}"
local ecroot="$FIRMWARE_ROOT/${build_name}"
if [[ ! -e "$payload" ]]; then
payload="$FIRMWARE_ROOT/depthcharge/${depthcharge_payload}"
fi
if [[ ! -e "$payload" ]]; then
echo "Could not find depthcharge.elf" >&2
exit 1
fi
echo "Depthcharge: ${payload}"
compress-assets "$FIRMWARE_ROOT/cbfs-ro-compress/${build_name}" \
"$cache_dir/compressed-assets-ro"
local file
for file in "$cache_dir/compressed-assets-ro"/*; do
cbfstool "${image}" add -r COREBOOT -f "${file}" \
-n "$(basename "${file}")" -t raw -c precompression
done
cbfstool "${image}" expand -r FW_MAIN_A,FW_MAIN_B
set -x
# "${cache_dir}"/util/cbfstool/
cbfstool "${image}" add-payload -r COREBOOT,FW_MAIN_A,FW_MAIN_B \
-f "${payload}" -n fallback/payload -c lzma -a 64 # -vvv
set +x
if [[ -e "${ecroot}/ec.RW.bin" ]]; then
cbfstool "${image}" add -r FW_MAIN_A,FW_MAIN_B -t raw -c lzma \
-f "${ecroot}/ec.RW.bin" -n "ecrw"
cbfstool "${image}" add -r FW_MAIN_A,FW_MAIN_B -t raw -c none \
-f "${ecroot}/ec.RW.hash" -n "ecrw.hash"
fi
if [[ -e "${ecroot}/ec.RO.bin" ]]; then
cbfstool "${image}" add -r FW_MAIN_A,FW_MAIN_B -t raw -c lzma \
-f "${ecroot}/ec.RO.bin" -n "ecro"
cbfstool "${image}" add -r FW_MAIN_A,FW_MAIN_B -t raw -c none \
-f "${ecroot}/ec.RO.hash" -n "ecro.hash"
fi
if [[ -e "${ecroot}/zephyr.bin" ]]; then
pushd "${work_dir}"
dump_fmap -x "${ecroot}/zephyr.bin" RW_FW
openssl dgst -sha256 -binary "RW_FW" > "ecrw.hash"
cbfstool "${image}" add -r FW_MAIN_A,FW_MAIN_B -t raw -c lzma \
-f "RW_FW" -n "ecrw"
cbfstool "${image}" add -r FW_MAIN_A,FW_MAIN_B -t raw -c none \
-f "ecrw.hash" -n "ecrw.hash"
popd
fi
}
# $1: config_path
function build-boot-image() {
local config_path="$1"
local work_dir build_name cache_dir image_name
work_dir="$(mktemp -d)" && CLEANUP+=("$work_dir")
build_name="${config_path##*.}"
cache_dir="$CACHE_DIR/$build_name"
image_name="image-$build_name.serial.bin"
echo "Building $config_path"
if [[ "$clean" -eq 1 ]]; then
rm -rf "$cache_dir"
fi
mount-overlay "$build_name"
build-coreboot-rom "$cache_dir" "$work_dir" "$config_path"
# cp ~/workspace/tmp/coreboot.rom "$cache_dir/coreboot.rom"
if grep -q CONFIG_VBOOT=y "$cache_dir/.config"; then
cp "$cache_dir/coreboot.rom" "$work_dir/$image_name"
add-depthcharge "$cache_dir" "$work_dir" "$work_dir/$image_name" "$build_name"
add-cbfs-master-header "$cache_dir" "$work_dir" "$work_dir/$image_name"
add-tianocore "$cache_dir" "$work_dir" "$work_dir/$image_name"
# add-ec "$work_dir" "$work_dir/$image_name" "${FIRMWARE_ROOT}/${build_name}/ec.RW.bin"
sign-region "$work_dir" "$work_dir/$image_name" "A"
sign-region "$work_dir" "$work_dir/$image_name" "B"
cp "$work_dir/$image_name" "$cache_dir/$image_name"
else
cp "$cache_dir/coreboot.rom" "$cache_dir/$image_name"
fi
if [[ -n "$gbb_flags" ]]; then
/usr/share/vboot/bin/set_gbb_flags.sh --file "$cache_dir/$image_name" "$gbb_flags"
fi
if [[ "$depthcharge_payload" == "netboot.elf" || "$depthcharge_payload" == "dev.elf" ]]; then
"/mnt/host/source/src/third_party/chromiumos-overlay/sys-boot/chromeos-bootimage/files/netboot_firmware_settings.py" \
-i "$cache_dir/$image_name" \
--bootfile="vmlinuz" --argsfile="config" --tftpserverip="10.0.0.144"
fi
popd
FINAL_IMAGES+=("$cache_dir/$image_name")
}
# $1: build_name
function mount-overlay() {
local base_name match merged_path build_name
build_name="$1"
base_name="$(basename "$PWD")"
merged_path="$(realpath "${PWD}/../${base_name}-merged-${build_name}")"
match="overlay $merged_path "
# Unmount and remount to pickup new changes
if grep -q "$match" /etc/mtab; then
sudo umount "$merged_path"
fi
mkdir -p "$merged_path"
echo "Creating overlay with coreboot-private"
sudo mount -t overlay overlay \
-o "lowerdir=${FIRMWARE_ROOT}/coreboot-private/:${PWD}" \
"$merged_path"
pushd "$merged_path"
UNMOUNT="$merged_path"
CLEANUP+=("$UNMOUNT")
}
# Ugh, still in the src tree
rm -f .xcompile
util/xcompile/xcompile /opt/coreboot-sdk/bin/ > .xcompile
declare -a FINAL_IMAGES
for config in "${CONFIGS[@]}"; do
build-boot-image "$config"
done
echo
for image in "${FINAL_IMAGES[@]}"; do
echo "*** Built $image ***"
done
if [[ "$do_flash" -eq 1 ]]; then
if [[ "${#FINAL_IMAGES[@]}" -gt 1 ]]; then
echo "Can't flash multiple images onto DUT"
exit 1
else
flash_ap "${FINAL_IMAGES[0]}"
fi
fi
#!/bin/bash -x
set -e -uo pipefail
# emerge-$BOARD --unmerge sys-kernel/chromeos-kernel-5_10 || true
if ! USE="acpidebug dyndebug" FEATURES="-buildpkg -sandbox -usersandbox" emerge-$BOARD sys-kernel/chromeos-kernel-5_10; then
echo "Failed to build"
exit 125
fi
if ! ~/trunk/src/scripts/update_kernel.sh --board=$BOARD --remote=$IP --remote_bootargs --clean; then
echo "Can't deploy abort bisect"
exit 128
fi
#!/bin/bash -x
#
# Starts Servod and opens up windows the the CPU, EC and CR50 UARTS
#
# Usage:
# Copy the file to ~/workspace/scripts on your host
# $ chmod +x ~/workspace/scripts/cros-dev
# $ ~/workspace/scripts/cros-dev --board=zork
set -eu -o pipefail
# Outside the chroot
if [[ ! -f /etc/cros_chroot_version ]]; then
declare OPTIONS=b:c:w:hm:
declare LONGOPTIONS=board:,cros:,workspace:,help,model:
if ! PARSED="$(getopt --options="$OPTIONS" --longoptions="$LONGOPTIONS" --name "$0" -- "$@")"; then
# getopt has complained about wrong arguments to stdout
exit 2
fi
# read getopt’s output this way to handle the quoting right:
eval set -- "$PARSED"
unset PARSED
declare BOARD=""
declare MODEL=""
declare HOST_WORKSPACE="${HOME}/workspace"
declare HOST_CROS_DIR="${HOME}/chromiumos"
declare HOST_BROWSER_DIR=
# now enjoy the options in order and nicely split until we see --
while true; do
case "$1" in
-c|--cros)
HOST_CROS_DIR="$2"
shift 2
;;
-b|--board)
BOARD="$2"
shift 2
;;
-m|--model)
MODEL="$2"
shift 2
;;
-w|--workspace)
HOST_WORKSPACE="$2"
shift 2
;;
-h|--help)
echo "--board <board>"
echo "--cros <path> : defaults to ~/chromiumos"
echo "--workspace <workspace> : defaults to ~/workspace"
exit 0
;;
--)
shift
break
;;
*)
echo "Unhandled args: $*"
exit 3
;;
esac
done
if [[ ! -d "$HOST_CROS_DIR" ]]; then
echo "$HOST_CROS_DIR does not exist. Use --cros <path> to specify your chromiumos directory" >&2
exit 1
fi
if [[ ! -d "$HOST_BROWSER_DIR" ]]; then
if [[ -d "${HOME}/chromium" ]]; then
HOST_BROWSER_DIR="${HOME}/chromium"
fi
fi
mkdir -p "$HOST_WORKSPACE"
mkdir -p "$HOST_WORKSPACE/.tmux"
declare XDG_CONFIG_HOME="$HOST_WORKSPACE/.config"
mkdir -p "$XDG_CONFIG_HOME"
mkdir -p "$XDG_CONFIG_HOME/cros-dev"
# if [[ ! -d "$XDG_CONFIG_HOME/tmux/plugins/tpm" ]]; then
# git clone https://github.com/tmux-plugins/tpm "$XDG_CONFIG_HOME/tmux/plugins/tpm"
# fi
#
# if [[ ! -d "$XDG_CONFIG_HOME/tmux/plugins/tmux-resurrect" ]]; then
# git clone https://github.com/tmux-plugins/tmux-resurrect "$XDG_CONFIG_HOME/tmux/plugins/tmux-resurrect"
# fi
declare SCRIPT_FILENAME
SCRIPT_FILENAME="$(basename "${BASH_SOURCE[0]}")"
CHROOT_CMD=(
"/mnt/host/scripts/${SCRIPT_FILENAME}"
)
if [[ -n "$BOARD" ]]; then
CHROOT_CMD+=(--board "$BOARD")
fi
if [[ -n "$MODEL" ]]; then
CHROOT_CMD+=(--model "$MODEL")
fi
# Bind mount the workspace
echo "$HOST_WORKSPACE /home/$USER/workspace" > "${HOST_CROS_DIR}/src/scripts/.local_mounts"
if [[ -d "$HOST_WORKSPACE/symbols" ]]; then
echo "${HOST_WORKSPACE}/symbols /home/$USER/workspace/symbols" >> "${HOST_CROS_DIR}/src/scripts/.local_mounts"
fi
# local_mounts will bind mount directories so we need to map the directory
# the script is in.
declare HOST_SCRIPT_DIR
HOST_SCRIPT_DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
echo "$HOST_SCRIPT_DIR /mnt/host/scripts" >> "${HOST_CROS_DIR}/src/scripts/.local_mounts"
if [[ -e /srv/tftp ]]; then
echo "/srv/tftp /mnt/host/tftp" >> "${HOST_CROS_DIR}/src/scripts/.local_mounts"
fi
if [[ -e ~/ti50 ]]; then
echo "${HOME}/ti50 /home/$USER/ti50" >> "${HOST_CROS_DIR}/src/scripts/.local_mounts"
fi
CROS_SDK_ARGS=(
--nouse-image
)
if [[ -n "$HOST_BROWSER_DIR" ]]; then
CROS_SDK_ARGS+=(
--chrome-root "$HOST_BROWSER_DIR"
)
fi
cd "$HOST_CROS_DIR"
# --no-ns-pid
exec cros_sdk "${CROS_SDK_ARGS[@]}" -- "${CHROOT_CMD[@]}" -- "$@"
fi
# Everything after this is ran inside the chroot.
function select-random-port() {
local -i lower_port upper_port current_port
read lower_port upper_port < /proc/sys/net/ipv4/ip_local_port_range
while :; do
# Pick a random port between (32768 - 8191) - 32768
current_port=$((lower_port - (RANDOM & 0x1FFFF)))
if (echo >/dev/tcp/localhost/8080) &>/dev/null; then
echo "Port $current_port is already in use"
continue
else
echo "$current_port"
return 0
fi
done
}
function select-existing-session() {
# TODO: List existing sessions
local -a sessions session_descriptions
local -a session_description
local session
if [[ ! -e "$TMUX_SOCKET" ]]; then
return 0
fi
readarray -t sessions < <(tmux -S "$TMUX_SOCKET" ls -F '#{session_name}')
if [[ "${#sessions[@]}" -eq 0 ]]; then
echo "No existing sessions" >&2
return 0
fi
for session in "${sessions[@]}"; do
local -a tmux_args=(
show-environment -t "$session" \;
#show-environment -t "$session" BOARD \;
#show-environment -t "$session" SERVO_SERIAL \;
#show-environment -t "$session" IP \;
#show-environment -t "$session" EM100_SERIAL \;
)
readarray -t session_description < <(tmux -S "$TMUX_SOCKET" "${tmux_args[@]}" 2>/dev/null)
session_descriptions+=("${session}: ${session_description[*]}")
done
local opt
local PS3='Pick an existing session: '
select opt in "${session_descriptions[@]}" "None" "Quit"
do
if [[ -z "$opt" ]]; then
echo "Invalid option $opt. Try another one."
continue
fi
case $opt in
"None")
return 0
;;
"Quit")
exit 0
;;
*)
exec tmux -S "$TMUX_SOCKET" attach-session -d -t "${sessions[REPLY-1]}"
;;
esac
done
exit 4
# echo "INFO: ${INFO[*]}"
}
function pick-session-name() {
local session_name
read -r -e -p "Enter Session Name: " session_name
if [[ -z "$session_name" ]]; then
echo "Invalid session name"
exit 1
fi
SESSION="$session_name"
}
function setup_env() {
select-existing-session
if [[ -n "${BOARD}" ]]; then
pick-servo
if [[ -z "$EM100_SERIAL" ]]; then
pick-em100
fi
select-ip
fi
if [[ -z "$SESSION" ]]; then
pick-session-name
fi
}
function get-servo-name() {
case "$1" in
"5002")
echo "Servo V2"
return
;;
"5014")
echo "Suzy-Q"
return
;;
"501a")
echo "Servo Micro"
return
;;
"501b")
echo "Servo V4"
return
;;
"5041")
echo "C2D2"
return
;;
"520d")
echo "Servo V4 p1"
return
;;
*) echo "Unknown 0x$1";;
esac
}
function select-ip() {
local default_ip=""
local prompt
local entered_ip
declare -A DEFAULT_IPS
if [[ -e "$XDG_CONFIG_HOME/cros-dev/ip-map" ]]; then
local serial_number ip
while IFS=" " read -r serial_number ip; do
DEFAULT_IPS[$serial_number]="$ip"
done < "$XDG_CONFIG_HOME/cros-dev/ip-map"
fi
if [[ -n "$SERVO_SERIAL" ]]; then
default_ip="${DEFAULT_IPS[$SERVO_SERIAL]+"${DEFAULT_IPS[$SERVO_SERIAL]}"}"
fi
if [[ -z "$default_ip" ]]; then
prompt="DUT IP or 'none': "
else
prompt="DUT IP (${default_ip}) or 'none': "
fi
read -r -e -p "$prompt" entered_ip
IP=
if [[ -n "$entered_ip" ]]; then
IP="$entered_ip"
if [[ -n "$SERVO_SERIAL" ]]; then
DEFAULT_IPS[$SERVO_SERIAL]="$entered_ip"
fi
elif [[ -n "$default_ip" ]]; then
IP="$default_ip"
fi
if [[ "$IP" == "none" ]]; then
IP=""
fi
if [[ -z "$SERVO_SERIAL" ]]; then
return
fi
if [[ -z "$IP" ]]; then
unset "DEFAULT_IPS[$SERVO_SERIAL]"
fi
rm -f "$XDG_CONFIG_HOME/cros-dev/ip-map.tmp"
if [[ "${DEFAULT_IPS[@]+"${#DEFAULT_IPS[@]}"}" -gt 0 ]]; then
for serial_number in "${!DEFAULT_IPS[@]}"
do
ip="${DEFAULT_IPS[$serial_number]}"
echo "$serial_number $ip" >> "$XDG_CONFIG_HOME/cros-dev/ip-map.tmp"
done
mv "$XDG_CONFIG_HOME/cros-dev/ip-map.tmp" "$XDG_CONFIG_HOME/cros-dev/ip-map"
fi
}
function pick-servo() {
local INFO
# Array of Serial\nPID
readarray -t INFO <<<"$(sudo lsusb -d 18d1: -v | sed -E -n \
-e '/^Bus / {s/^Bus [[:digit:]]* Device [[:digit:]]*: ID ([[:xdigit:]]*):([[:xdigit:]]*).*/\2/; h;}' \
-e '/^\s*iSerial/ {s/^\s*iSerial\s*[[:digit:]]+\s*//; G; p }')"
# echo "${#INFO[@]}: ${INFO[*]}"
local -a SERVO_SERIALS
local -a SERVO_OPTIONS
local -a SERVO_TYPES
local i=0
for ((i=2;i<=${#INFO[@]};i=i+2)); do
local PID="${INFO[i-1]}"
# Skip this unknown v2 device
if [[ "$PID" == "5003" ]]; then
continue
fi
SERVO_SERIALS+=("${INFO[i-2]}")
SERVO_OPTIONS+=("$(get-servo-name "$PID") [${INFO[i-2]}]")
SERVO_TYPES+=("$(get-servo-name "$PID")")
done
# echo "options: ${SERVO_OPTIONS[*]+"${SERVO_OPTIONS[*]}"}"
# echo "serials: ${SERVO_SERIALS[*]+"${SERVO_SERIALS[*]}"}"
local opt
local PS3='Pick a servo: '
select opt in "${SERVO_OPTIONS[@]+"${SERVO_OPTIONS[@]}"}" "None" "Quit"
do
if [[ -z "$opt" ]]; then
echo "Invalid option $opt. Try another one."
continue
fi
case $opt in
"None")
break
;;
"Quit")
exit 0
;;
*)
SERVO_SERIAL="${SERVO_SERIALS[REPLY-1]}"
SERVO_TYPE="${SERVO_TYPES[REPLY-1]}"
if [[ -n "${MODEL}" ]]; then
SESSION="$MODEL-$SERVO_SERIAL"
else
SESSION="$BOARD-$SERVO_SERIAL"
fi
break
;;
esac
done
# echo "SERVO SERIAL: $SERVO_SERIAL"
}
function pick-em100() {
if [[ ! -x /usr/sbin/em100 ]]; then
return
fi
readarray -t INFO <<<"$(sudo /usr/sbin/em100 --list-devices)"
# echo "${#INFO[@]}: ${INFO[*]}"
if [[ ${#INFO[@]} -eq 0 ]];then
return
fi
local -a EM100_DESC
local -a EM100_SERIALS
local i=0
for (( i=0; i<${#INFO[@]}; i=i+1 )); do
local line="${INFO[i]}"
# echo "$line"
if [[ "$line" = "No EM100pro devices found." ]]; then
break
fi
if [[ "$line" =~ ^\ Bus ]]; then
EM100_SERIALS+=("$(rev <<<"$line" | cut -d ' ' -f 1 | rev)")
EM100_DESC+=("$(rev <<<"$line" | cut -d ' ' -f 1-2 | rev)")
else
echo "Unknown line: $line" >&2
fi
done
# echo "options: ${EM100_DESC[*]+"${EM100_DESC[*]}"}"
# echo "serials: ${EM100_SERIALS[*]+"${EM100_SERIALS[*]}"}"
if [[ "${EM100_DESC[@]+"${#EM100_DESC[@]}"}" -eq 0 ]]; then
return
fi
local opt
local PS3='Pick an em100: '
select opt in "${EM100_DESC[@]+"${EM100_DESC[@]}"}" "None" "Quit"
do
if [[ -z "$opt" ]]; then
echo "Invalid option $opt. Try another one."
continue
fi
case $opt in
"None")
break
;;
"Quit")
exit 0
;;
*)
EM100_SERIAL="${EM100_SERIALS[REPLY-1]}"
break
;;
esac
done
# echo "EM100 SERIAL: $EM100_SERIAL"
}
declare OPTIONS=s:r:b:w:i:m:
declare LONGOPTIONS=session:,servo-serial:,board:,shell-only:,workspace:,ip:,model:
if ! PARSED="$(getopt --options="$OPTIONS" --longoptions="$LONGOPTIONS" --name "$0" -- "$@")"; then
# getopt has complained about wrong arguments to stdout
exit 2
fi
# read getopt’s output this way to handle the quoting right:
eval set -- "$PARSED"
unset PARSED
declare CHROOT_SCRIPTS_DIR="/mnt/host/scripts"
declare CHROOT_WORKSPACE="/home/$USER/workspace"
declare SESSION=""
declare SERVO_SERIAL=""
declare SERVO_TYPE=""
declare EM100_SERIAL=""
declare BOARD=""
declare MODEL=""
# now enjoy the options in order and nicely split until we see --
while true; do
case "$1" in
-b|--board)
BOARD="$2"
shift 2
;;
-m|--model)
MODEL="$2"
shift 2
;;
--)
shift
break
;;
*)
echo "Unhandled args: $*"
exit 3
;;
esac
done
declare IP=""
export XDG_CONFIG_HOME="${CHROOT_WORKSPACE}/.config"
declare -a MISSING_PACKAGES
if ! command -v tmux > /dev/null ; then
MISSING_PACKAGES+=('tmux')
fi
if ! command -v picocom > /dev/null; then
MISSING_PACKAGES+=('picocom')
fi
if ! command -v xsel > /dev/null; then
MISSING_PACKAGES+=('x11-misc/xsel')
fi
if ! command -v /usr/sbin/em100 > /dev/null; then
MISSING_PACKAGES+=('sys-apps/em100')
fi
if [[ "${MISSING_PACKAGES[@]+"${#MISSING_PACKAGES[@]}"}" -gt 0 ]]; then
sudo emerge -aj "${MISSING_PACKAGES[@]}"
fi
mkdir -p "${HOME}/.tmux/"
declare TMUX_SOCKET="${HOME}/.tmux/tmux.socket"
setup_env
# Bash config
mkdir -p "$XDG_CONFIG_HOME/bash"
export HISTFILE="$XDG_CONFIG_HOME/bash/history"
export HISTCONTROL=erasedups:ignorespace
export HISTSIZE=5000
echo "export PATH=\"\$PATH:${CHROOT_SCRIPTS_DIR}\"" | \
sudo tee /etc/bash/bashrc.d/workspace.sh > /dev/null
function setup_uart() {
local NAME="$1"
local PTY_COMMAND="$2"
local BAUD_COMMAND="${PTY_COMMAND%_pty}_baudrate"
local BITS_COMMAND="${PTY_COMMAND%_pty}_bits"
if [[ -z "${DUT_CONTROLS["${PTY_COMMAND}"]+"${DUT_CONTROLS["${PTY_COMMAND}"]}"}" ]]; then
return
fi
# if [[ ${DUT_CONTROLS["${BAUD_COMMAND}"]} -ne 1 ]]; then
# BAUD_COMMAND=""
# fi
#
# if [[ "${DUT_CONTROLS["${BITS_COMMAND}"]+"${DUT_CONTROLS["${BITS_COMMAND}"]}"}" -ne 1 ]]; then
# BITS_COMMAND=""
# fi
mkdir -p "${CHROOT_WORKSPACE}/logs"
local cmd
read -r -d '' cmd <<-'EOF' || true # read returns 1 on EOF
NAME="$1"
PTY_COMMAND="$2"
BAUD_COMMAND="$3"
BITS_COMMAND="$4"
ARGS=(--quiet)
PTY="$(dut-control --port="$SERVOD_PORT" "${PTY_COMMAND}" | cut -d : -f 2)"
if [[ -z "$PTY" ]]; then
echo "Failed to find pty for $NAME"
return
fi
if [[ -n "$BAUD_COMMAND" ]]; then
BAUDRATE="$(dut-control --port="$SERVOD_PORT" "${BAUD_COMMAND}" | cut -d : -f 2)"
if [[ "$BAUDRATE" =~ ^[0-9]+$ ]] ; then
ARGS+=(-b "$BAUDRATE")
fi
fi
if [[ -n "${BITS_COMMAND}" ]]; then
BITS="$(dut-control --port="$SERVOD_PORT" "${BITS_COMMAND}" | cut -d : -f 2)"
if [[ "$BITS" == "eight" ]] ; then
ARGS+=(--databits 8)
elif [[ "$BITS" == "seven" ]] ; then
ARGS+=(--databits 7)
fi
fi
# local LOGFILE
# LOGFILE="${CHROOT_WORKSPACE}/logs/${SESSION}-${NAME}-$(date +%F-%T).log"
# ARGS+=(--logfile "$LOGFILE")
# ln -fs "${LOGFILE}" "${CHROOT_WORKSPACE}/logs/${NAME}-latest.log"
exec picocom "${ARGS[@]}" "$PTY"
EOF
tmux -S "${TMUX_SOCKET}" new-window -t "=${SESSION}" -d \
-n "$NAME" bash -x -e -c "$cmd" _ \
"$NAME" "$PTY_COMMAND" "$BAUD_COMMAND" "$BITS_COMMAND" \; \
set-window-option -t "=${SESSION}:=${NAME}" remain-on-exit on \;
}
TMUX_CONFIG=(
set -g @plugin 'tmux-plugins/tmux-resurrect' \;
set -g @resurrect-dir "$XDG_CONFIG_HOME/tmux/resurrect" \;
set-option -g history-limit 100000 \;
set-option -g mouse on \;
# The version of ncurses in the chroot needs to be updated.
# set-option -g default-terminal "tmux-256color" \;
# Gnome terminal does not support OCS 52
set-option -g set-clipboard off \;
set-option -g allow-rename off \;
set-option -g mode-keys vi \;
# set-window-option remain-on-exit on \;
# Don't update the environment because it messes with DISPLAY
set-option -g update-environment '' \;
# unbind-key -T copy-mode MouseDragEnd1Pane \;
# unbind-key -T copy-mode-vi MouseDragEnd1Pane \;
# bind-key -T prefix x confirm-before -p "kill-pane #P? (y/n)"
# 'respawn-pane -k' \;
# emacs bindings
# bind-key -T copy-mode MouseDragEnd1Pane
# send-keys -X copy-pipe "xclip -selection primary -i" \;
# bind-key -T copy-mode M-w
# send-keys -X copy-pipe "xclip -selection clipboard -i" \\\;
# send-keys -X copy-pipe-and-cancel "xclip -selection primary -i" \;
# bind-key -T copy-mode C-w
# send-keys -X copy-pipe "xclip -selection clipboard -i" \\\;
# send-keys -X copy-pipe-and-cancel "xclip -selection primary -i" \;
# Primary clipboard is Middle click
# vim bindings
bind-key -T copy-mode-vi MouseDragEnd1Pane
send-keys -X copy-pipe-no-clear "xsel --primary -i" \;
bind-key -T copy-mode-vi C-j
#send-keys -X copy-pipe "xsel --primary -i" \\\;
send-keys -X copy-pipe-and-cancel "xsel --clipboard -i" \;
bind-key -T copy-mode-vi Enter
#send-keys -X copy-pipe "xsel --primary -i" \\\;
send-keys -X copy-pipe-and-cancel "xsel --clipboard -i" \;
bind-key -T copy-mode-vi y
#send-keys -X copy-pipe "xsel --primary -i" \\\;
send-keys -X copy-pipe-and-cancel "xsel --clipboard -i" \;
# Double LMB Select & Copy (Word)
bind-key -T copy-mode-vi DoubleClick1Pane
select-pane \\\;
send-keys -X select-word \\\;
send-keys -X copy-pipe "xsel --primary -i" \;
bind-key -n DoubleClick1Pane
select-pane \\\;
copy-mode -M \\\;
send-keys -X select-word \\\;
send-keys -X copy-pipe "xsel --primary -i" \;
# Triple LMB Select & Copy (Line)
bind-key -T copy-mode-vi TripleClick1Pane
select-pane \\\;
send-keys -X select-line \\\;
send-keys -X copy-pipe "xsel --primary -i" \;
bind-key -n TripleClick1Pane
select-pane \\\;
copy-mode -M \\\;
send-keys -X select-line \\\;
send-keys -X copy-pipe "xsel --primary -i" \;
bind-key -T copy-mode S-PgUp send-keys -X page-up \;
bind-key -T copy-mode S-PgDn send-keys -X page-down \;
bind-key -n C-l send-keys -R \\\; clear-history \;
bind-key -n S-PgUp copy-mode \;
bind-key P command-prompt -p 'save history to filename:' -I '~/workspace/logs/' 'capture-pane -S - ; save-buffer %1 ; delete-buffer' \;
bind-key C-R respawn-pane \;
# pane-exited
# Not useful because we don't have a .tmux config
# set-environment -g TMUX_PLUGIN_MANAGER_PATH "$XDG_CONFIG_HOME/tmux/plugins"\;
# run-shell -b "$XDG_CONFIG_HOME/tmux/plugins/tpm/tpm"\;
# run-shell "$XDG_CONFIG_HOME/tmux/plugins/tmux-resurrect/resurrect.tmux" \;
)
declare -a TMUX_SESSION_ARGS=(
-s "$SESSION"
# -E
-x "$(tput cols)"
)
declare -a TMUX_POST=()
if [[ -z "$SERVO_SERIAL" ]]; then
MAIN_CMD=(/bin/bash)
RUNNER="exec"
else
# if [[ -d /tmp/servoscratch ]]; then
# sudo rm -rf "/tmp/servoscratch/${SERVOD_PORT}"
# sudo find /tmp/servoscratch/ -xtype l -delete
# fi
TMUX_SESSION_ARGS+=(
-d # Create a detached session
-n servod
)
if ! SERVOD_PORT="$(select-random-port)"; then
echo "Failed to select random servo port" >&2
exit
fi
TMUX_SESSION_ARGS+=(-e "SERVOD_PORT=${SERVOD_PORT}")
if [[ -n "$SERVO_TYPE" ]]; then
TMUX_SESSION_ARGS+=(-e "SERVO_TYPE=$SERVO_TYPE")
fi
MAIN_CMD=(
sudo servod -b "${BOARD/-kernelnext}" # --debug
# --vendor "$(printf "%d\n" 0x18d1)"
# --product "$(printf "%d\n" "$SERVO_PID")"
-s "$SERVO_SERIAL"
--port "$SERVOD_PORT"
)
# Enable USB Keyboard emulation on Servo V4
# if [[ "$SERVO_PID" == "0x501b" ]]; then
# MAIN_CMD+=(--usbkm232=atmega)
# fi
MAIN_CMD+=("$@")
RUNNER="command"
TARGET_SERVOD="=${SESSION}:=servod"
TMUX_POST+=(
set-window-option -t "${TARGET_SERVOD}" remain-on-exit on \;
)
fi
if [[ -n "${BOARD}" ]]; then
TMUX_SESSION_ARGS+=(-e "BOARD=${BOARD}")
fi
if [[ -n "${MODEL}" ]]; then
TMUX_SESSION_ARGS+=(-e "MODEL=${MODEL}")
fi
if [[ -n "${IP}" ]]; then
TMUX_SESSION_ARGS+=(-e "IP=${IP}")
fi
if [[ -n "${EM100_SERIAL}" ]]; then
TMUX_SESSION_ARGS+=(-e "EM100_SERIAL=${EM100_SERIAL}")
fi
# Servod can leave stale state which breaks it
sudo servodutil rebuild
# Create session
"$RUNNER" tmux -S "${TMUX_SOCKET}" \
"${TMUX_CONFIG[@]}" \
new-session "${TMUX_SESSION_ARGS[@]}" \
"${MAIN_CMD[@]}" \; \
"${TMUX_POST[@]+"${TMUX_POST[@]}"}"
function check-servod() {
if [[ "$(tmux -S "${TMUX_SOCKET}" list-panes -t "${TARGET_SERVOD}" -F "#{pane_dead}")" != "0" ]]; then
echo "servod failed to start"
# Attach to show the error message
echo "${MAIN_CMD[@]}"
tmux -S "${TMUX_SOCKET}" capture-pane -p -e -S- -E- -t "${TARGET_SERVOD}.0"
tmux -S "${TMUX_SOCKET}" kill-session -t "=${SESSION}"
exit 1
fi
tmux -S "${TMUX_SOCKET}" capture-pane -p -e -S- -E- -t "${TARGET_SERVOD}.0"
}
function update-servod-port() {
SERVOD_PORT="$(
tmux -S "${TMUX_SOCKET}" \
capture-pane -p -e -S- -E- -t "${TARGET_SERVOD}.0" | \
sed -n -E 's/^.* - Listening on ([a-zA-Z0-9_]*) port ([0-9]*)/\2/p'
)"
}
SERVOD_PORT=""
echo "Waiting for servod to start"
while [[ -z "$SERVOD_PORT" ]]; do
check-servod
update-servod-port
sleep 0.5
done
check-servod
echo "servod started on port $SERVOD_PORT"
declare -A DUT_CONTROLS
while read -r key; do
DUT_CONTROLS[$key]=1
done < <(dut-control --port "$SERVOD_PORT" -i | \
sed -E -n -e '/\* MAP/q' -e 's/([a-zA-Z0-9_]+) .+/\1/p')
CONSOLES=()
for control in "${!DUT_CONTROLS[@]}" ; do
if [[ $control =~ _uart_pty$ ]]; then
CONSOLES+=("$control")
fi
done
echo "Available consoles: " "${CONSOLES[@]}"
setup_uart cpu cpu_uart_pty
setup_uart ec ec_uart_pty
setup_uart cr50 cr50_uart_pty
setup_uart servo_v4 servo_v4_console_pty
check-servod
tmux -S "${TMUX_SOCKET}" new-window -t "$SESSION" -d -n ssh
tmux -S "${TMUX_SOCKET}" new-window -t "$SESSION" -d -n coreboot -e \
CHROOT_CWD=~/trunk/src/third_party/coreboot
tmux -S "${TMUX_SOCKET}" new-window -t "$SESSION" -d -n ec-dev -e \
CHROOT_CWD=~/trunk/src/platform/ec
tmux -S "${TMUX_SOCKET}" new-window -t "$SESSION" -d -n kernel-dev -e \
CHROOT_CWD=~/trunk/src/third_party/kernel
tmux -S "${TMUX_SOCKET}" new-window -t "$SESSION" -d
# Need to re-think the init process
exec tmux -S "${TMUX_SOCKET}" attach -t "$SESSION"
#!/bin/bash
IDENTITY="$(mktemp)"
cat /mnt/host/source/src/third_party/chromiumos-overlay/chromeos-base/chromeos-ssh-testkeys/files/testing_rsa > "$IDENTITY"
# Identity needs 400
chmod 400 "$IDENTITY"
exec scp -o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no \
-o CheckHostIP=no \
-i "$IDENTITY" \
"$@"
#!/bin/bash
IDENTITY="$(mktemp)"
cat /mnt/host/source/src/third_party/chromiumos-overlay/chromeos-base/chromeos-ssh-testkeys/files/testing_rsa > "$IDENTITY"
# Identity needs 400
chmod 400 "$IDENTITY"
exec ssh -o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no \
-o CheckHostIP=no \
-i "$IDENTITY" \
-l root \
"$@"
#!/bin/bash -x
set -e
USE_FLASHROM=0
declare -a DUR_CONTROL_CLEANUP
function cleanup {
local rc=$?
echo "$rc"
if [[ ${#DUR_CONTROL_CLEANUP[@]} -eq 0 ]]; then
return
fi
dut-control "${DUR_CONTROL_CLEANUP[@]}"
DUR_CONTROL_CLEANUP=()
}
trap cleanup EXIT
function try-em100() {
if [[ -n "$EM100_SERIAL" && -f /usr/sbin/em100 ]] && sudo /usr/sbin/em100 -l | grep -q "$EM100_SERIAL"; then
dut-control cold_reset:on
dut-control servo_present:on || true
sudo /usr/sbin/em100 --device "$EM100_SERIAL" --stop -c W25Q128FW --download "$IMG" --holdpin LOW --start
# dut-control spi_hold:on
dut-control cold_reset:off
exit 0
fi
dut-control spi_hold:off || true
dut-control servo_present:off || true
}
function try-ssh() {
local IP="$1"
if [[ -z "$IP" ]]; then
return
fi
local IDENTITY
IDENTITY="$(mktemp)"
cat /mnt/host/source/src/third_party/chromiumos-overlay/chromeos-base/chromeos-ssh-testkeys/files/testing_rsa > "$IDENTITY"
# Identity needs 400
chmod 400 "$IDENTITY"
local SSH_ARGS=(
-o "ConnectTimeout=5"
-o "UserKnownHostsFile=/dev/null"
-o "StrictHostKeyChecking=no"
-o "CheckHostIP=no"
-o "BatchMode=yes"
-i "$IDENTITY"
"root@$IP"
)
local script
if [[ $USE_FLASHROM -eq 1 ]]; then
script='flashrom --fast-verify -p host -w - && reboot'
else
script='futility update --force --fast --image - && reboot'
fi
local SUCCESS=0
# If the AP is up and ssh is available, scp the rom image over since it's faster
if ssh "${SSH_ARGS[@]}" "$script" < "$IMG"; then
SUCCESS=1
fi
rm -rf "$IDENTITY"
if [[ $SUCCESS -eq 1 ]]; then
exit 0
fi
}
function get-child-servo-serial() {
local ROOT_SERVO_SERIAL="$1"
local SERVOD_INFO
SERVOD_INFO="$(servodutil show -s "$ROOT_SERVO_SERIAL")"
python - "$SERVOD_INFO" "$ROOT_SERVO_SERIAL" <<'EOF'
import re
import sys
info = sys.argv[1]
root_servo_serial = sys.argv[2]
kv = dict(re.findall(r"^([^:]+) : (.+)", sys.argv[1], re.MULTILINE))
if not "serials" in kv:
print(root_servo_serial)
sys.exit(0)
serials = eval(kv["serials"])
serials.remove(root_servo_serial)
if not serials:
print(root_servo_serial)
sys.exit(0)
elif len(serials) > 1:
sys.stderr.write("Multiple child servos active: %s\n" % (kv["serials"]),
file=sys.stderr)
sys.exit(1)
else:
print(serials[0])
sys.exit(0)
EOF
}
function get-servo-serial() {
local SERVO_TYPE="$1"
local ROOT_SERVO_SERIAL
ROOT_SERVO_SERIAL="$(dut-control serialname | cut -d : -f 2)"
if [[ "$SERVO_TYPE" = "servo_type:servo_v2" || \
"$SERVO_TYPE" = "servo_type:ccd_cr50" || \
"$SERVO_TYPE" = "servo_type:servo_micro" ]]; then
echo "$ROOT_SERVO_SERIAL"
elif [[ "$SERVO_TYPE" = "servo_type:servo_v4_with_servo_micro" || \
"$SERVO_TYPE" = "servo_type:servo_v4_with_ccd_cr50" ]]; then
get-child-servo-serial "$ROOT_SERVO_SERIAL"
else
echo "$SERVO_TYPE is not supported!" 1>&2
exit 1
fi
}
function try-servo() {
local SERVO_TYPE SERVO_SERIAL
SERVO_TYPE="$(dut-control servo_type)"
SERVO_SERIAL="$(get-servo-serial "$SERVO_TYPE")"
if [[ "$SERVO_TYPE" = "servo_type:servo_v2" ]]; then
DUR_CONTROL_CLEANUP+=(spi2_buf_en:off spi2_buf_on_flex_en:off spi2_vref:off servo_present:off cold_reset:off)
dut-control spi2_buf_en:on spi2_buf_on_flex_en:on spi2_vref:pp1800 cold_reset:on servo_present:on
if [[ $USE_FLASHROM -eq 1 ]]; then
sudo /usr/sbin/flashrom -w "$IMG" --fast-verify \
-p "ft2232_spi:type=servo-v2,serial=$SERVO_SERIAL"
else
sudo /usr/bin/futility update -d -v --wp 0 --force --fast --image "$IMG" \
-p "ft2232_spi:type=servo-v2,serial=$SERVO_SERIAL" --factory
fi
elif [[ "$SERVO_TYPE" = "servo_type:ccd_cr50" || "$SERVO_TYPE" = "servo_type:servo_v4_with_ccd_cr50" ]]; then
DUR_CONTROL_CLEANUP+=(warm_reset:off)
dut-control warm_reset:on
echo "This is going to take a while, be patient."
sudo /usr/bin/futility update --wp 0 --force --fast --image "$IMG" \
-p "raiden_debug_spi:serial=$SERVO_SERIAL" -d
elif [[ "$SERVO_TYPE" = "servo_type:servo_micro" || "$SERVO_TYPE" = "servo_type:servo_v4_with_servo_micro" ]]; then
DUR_CONTROL_CLEANUP+=(servo_present:off spi2_vref:off spi2_buf_en:off cold_reset:off)
dut-control cold_reset:on spi2_vref:pp1800 spi2_buf_en:on servo_present:on
if [[ $USE_FLASHROM -eq 1 ]]; then
sudo /usr/sbin/flashrom -w "$IMG" \
-p "raiden_debug_spi:serial=$SERVO_SERIAL" --fast-verify
else
sudo /usr/bin/futility update --wp 0 --force --fast --image "$IMG" \
-p "raiden_debug_spi:serial=$SERVO_SERIAL"
fi
else
echo "$SERVO_TYPE is not supported!"
exit 1
fi
DUR_CONTROL_CLEANUP+=(sleep:1 pwr_button_hold:10)
cleanup
}
IMG="${1:-/build/${BOARD}/firmware/image-${MODEL:-$BOARD}.serial.bin}"
if [[ ! -r "$IMG" ]]; then
echo "$IMG does not exist" 1>&2
exit 1
fi
echo "Flashing $IMG"
if [[ "$BOARD" == "zork" ]]; then
USE_FLASHROM=1
fi
try-em100
try-ssh "${IP}"
try-servo
#!/bin/bash -x
set -eu -o pipefail
DEST=$HOME/workspace/tmp/acpi-tables
# make sure we don't delete our own directory
cd /
rm -rf "$DEST"
dut-scp -r root@$IP:/sys/firmware/acpi/tables/ "$DEST"
pushd "$DEST"
iasl -e SSDT* -d DSDT
iasl -e DSDT -d SSDT*
iasl -d FACP
iasl -d HPET
if [[ -e SPCR ]]; then
iasl -d SPCR
fi
if [[ -e SPCR1 ]]; then
iasl -d SPCR1
fi
if [[ -e SPCR2 ]]; then
iasl -d SPCR2
fi
if [[ -e DBG2 ]]; then
iasl -d DBG2
fi
#!/bin/bash
set -eu
OPTIONS=lv:b:
LONGOPTIONS=list,version:,board:
# -temporarily store output to be able to check for errors
# -e.g. use “--options” parameter by name to activate quoting/enhanced mode
# -pass arguments only via -- "$@" to separate them correctly
PARSED=$(getopt --options="$OPTIONS" --longoptions="$LONGOPTIONS" --name "$0" -- "$@")
if [[ $? -ne 0 ]]; then
# e.g. $? == 1
# then getopt has complained about wrong arguments to stdout
exit 2
fi
# read getopt’s output this way to handle the quoting right:
eval set -- "$PARSED"
LIST_ONLY=0
VERSION=""
# now enjoy the options in order and nicely split until we see --
while true; do
case "$1" in
-b|--board)
BOARD="$2"
shift 2
;;
-l|--list)
LIST_ONLY=1
shift 1
;;
-v|--version)
VERSION="$2"
shift 2
;;
--)
shift
break
;;
*)
echo "Unhandled args: $*"
exit 3
;;
esac
done
if [[ -z "$BOARD" ]]; then
echo "--board <board> is required"
exit 2
fi
if [[ $LIST_ONLY -eq 1 ]]; then
gsutil ls "gs://chromeos-releases/canary-channel/${BOARD}/" | sort | less
exit 1
fi
if [[ -n "$VERSION" ]]; then
LATEST_FW="gs://chromeos-releases/canary-channel/${BOARD}/${VERSION}/ChromeOS-firmware-*"
else
LATEST_FW_PATH="$(gsutil ls gs://chromeos-releases/canary-channel/${BOARD}/| sort | tail -n 1)"
if [[ -z "$LATEST_FW_PATH" ]]; then
echo "No firmware found for board $BOARD" >&2
exit 1
fi
VERSION="$(basename "${LATEST_FW_PATH}")"
LATEST_FW="${LATEST_FW_PATH}ChromeOS-firmware-*"
fi
FWTMP="$(mktemp -d)"
gsutil cp "$LATEST_FW" "$FWTMP/"
DESTDIR="/tmp/chromeos-firmware-$BOARD-$VERSION"
if [[ -e "$DESTDIR" ]]; then
rm -rf "$DESTDIR"
fi
mkdir -p "$DESTDIR"
tar -C "$DESTDIR" -xvf "${FWTMP}/"*
rm -r "$FWTMP"
echo "Firmware located at ${DESTDIR}"
#!/bin/bash -x
# Installation: Put this script somewhere in your $PATH.
#
# Usage: git push-repo <-- Yes... git<space>push-repo
#
# This script helps in curating the commits you push to gerrit.
#
# The script does the following:
# * Clones the branch you are currently working on.
# * Does an interactive rebase (unless --no-interactive) is passed.
# This allows you to pick which CLs you want to push. It's very useful
# for breaking up large patch trains into smaller pieces.
# * Once the interactive rebase is done, the remaining patches are rebased
# onto the upstream branch. If you want to stack your CLs onto an already
# existing patch that is not part of your patch train use the
# `--onto <hash>` option.
# * If the rebase is successful `repo upload` will be run. If you encounter
# pre-submit verification errors you can use --no-verify.
#
# Notes:
# * If you need to update a patch in the middle of a patch train, make sure
# when you push, you also push the same parents. This will ensure the
# parent hashes stay the same.
set -eu -o pipefail
OPTIONS=inswru:
LONGOPTIONS=no-interactive,dry-run,stay-on-upstream,wip,ready,no-verify,onto:,reviewers:,verified,cq:
if ! PARSED="$(getopt --options="$OPTIONS" --longoptions="$LONGOPTIONS" --name "$0" -- "$@")"; then
exit 2
fi
# read getopt’s output this way to handle the quoting right:
eval set -- "$PARSED"
TEMP_BRANCH=
DRY_RUN=0
STAY_ON_UPSTREAM=0
INTERACTIVE=1
WIP=0
READY=0
VERIFY=1
REBASE_TARGET=
REVIEWERS=
# CQ Verified
VERIFIED=0
CQ=
# now enjoy the options in order and nicely split until we see --
while [[ $# -gt 0 ]]; do
case "$1" in
-d|--no-interactive)
INTERACTIVE=0
shift 1
;;
-n|--dry-run)
DRY_RUN=1
shift 1
;;
-s|--stay-on-upstream)
STAY_ON_UPSTREAM=1
shift 1
;;
-w|--wip)
WIP=1
shift 1
;;
-r|--ready)
READY=1
shift 1
;;
--no-verify)
VERIFY=0
shift 1
;;
--reviewers)
REVIEWERS="$2"
shift 2
;;
--onto)
REBASE_TARGET="$2"
shift 2
;;
--verified)
VERIFIED=1
shift 1
;;
--cq)
CQ="$2"
shift 2
;;
--help)
echo "Usage: $0 [-d|--no-interactive] [-n|--dry-run] [-s|--stay-on-upstream] [-w|--wip] [-r|--ready] [--no-verify] [--reviewers=] [--onto] [--verified]"
;;
--)
shift
break
;;
*)
echo "Unhandled args: $*"
exit 3
;;
esac
done
TOP_LEVEL="$(git rev-parse --show-toplevel)"
if [[ -z "$TOP_LEVEL" ]]; then
echo "Not in a git repo"
exit 1
fi
cd "$TOP_LEVEL"
CURRENT_BRANCH="$(git symbolic-ref --short HEAD)"
if [[ -z "$CURRENT_BRANCH" ]]; then
echo "Not on a branch"
exit 1
fi
function finish {
if [[ $STAY_ON_UPSTREAM -eq 0 ]]; then
REBASE_PATHS=(rebase-apply rebase-merge)
for REBASE_PATH in "${REBASE_PATHS[@]}"; do
REBASE_PATH="$(git rev-parse --git-path "${REBASE_PATH}")"
if [[ -d "$REBASE_PATH" ]]; then
echo "Rebase never completed, aborting"
git rebase --abort
fi
done
git checkout -q "${CURRENT_BRANCH}"
if [[ -n "$TEMP_BRANCH" ]]; then
git branch -D "$TEMP_BRANCH"
fi
else
echo "To switch to the previous branch run:"
echo " git checkout $CURRENT_BRANCH"
if [[ -n "$TEMP_BRANCH" ]]; then
echo "git branch -D $TEMP_BRANCH"
fi
fi
}
trap finish EXIT
if ! UPSTREAM_REF="$(git rev-parse --symbolic-full-name "@{u}")"; then
echo "Missing remote for branch" >&2
exit 1
fi
if ! git diff-index --ignore-submodules --quiet HEAD --; then
echo "You have unstaged changes."
echo "Please commit or stash them."
exit 1
fi
# We use a temp branch because repo upload requires a tracking branch
TEMP_BRANCH_NAME="repo-${CURRENT_BRANCH}-$(git rev-parse --short HEAD)"
if ! git branch --copy --force "$CURRENT_BRANCH" "$TEMP_BRANCH_NAME"; then
echo "Failed to copy branch";
exit 1;
fi
if ! git checkout "$TEMP_BRANCH_NAME"; then
echo "Failed to switch to $TEMP_BRANCH_NAME";
exit 1;
fi
TEMP_BRANCH="$TEMP_BRANCH_NAME"
if [[ -z "$REBASE_TARGET" ]]; then
REBASE_TARGET="@{upstream}"
fi
if [[ $INTERACTIVE -eq 1 ]]; then
if ! git rebase -i --onto "${REBASE_TARGET}"; then
echo "Interactive rebase onto ${REBASE_TARGET} failed."
exit 1
fi
# Interactive rebase does not support the --committer-date-is-author-date flag
# so we need to manually clean it up.
export FILTER_BRANCH_SQUELCH_WARNING=1
if ! git filter-branch -f \
--env-filter 'export GIT_COMMITTER_DATE="${GIT_AUTHOR_DATE}"' \
"$REBASE_TARGET"..; then
echo "Commit date cleanup failed!"
exit 1
fi
else
if ! git rebase --committer-date-is-author-date \
--onto "${REBASE_TARGET}"; then
echo "Rebase onto ${REBASE_TARGET} failed."
# I think we need to stop the git command
# (kill -SIGTSTP "$$")
exit 1
fi
fi
echo "Rebased commit: $(git rev-parse --short --verify HEAD)"
declare -a REPO_OPTS
# Current branch
REPO_OPTS+=(--cbr)
if [[ $VERIFY -eq 1 ]]; then
REPO_OPTS+=("--verify")
else
REPO_OPTS+=("--no-verify")
fi
if [[ $DRY_RUN -eq 1 ]]; then
REPO_OPTS+=("--dry-run")
fi
if [[ -n "${REVIEWERS}" ]]; then
REPO_OPTS+=("--reviewers=${REVIEWERS}")
fi
if [[ $WIP -eq 1 ]]; then
REPO_OPTS+=("--wip")
fi
if [[ "${VERIFIED}" -eq 1 ]]; then
REPO_OPTS+=("-l" "Verified+1")
fi
if [[ -n "${CQ}" ]]; then
REPO_OPTS+=("-l" "Commit-Queue+${CQ}")
fi
if ! repo upload . "${REPO_OPTS[@]}"; then
echo " Failed to push!"
exit 1
fi
#!/bin/bash
# Installation: Put this script somewhere in your $PATH.
#
# Usage: git push-upstream <-- Yes... git<space>push-upstream
#
# This script helps in curating the commits you push to gerrit.
#
# The script does the following:
# * Clones the branch you are currently working on.
# * Does an interactive rebase (unless --no-interactive) is passed.
# This allows you to pick which CLs you want to push. It's very useful
# for breaking up large patch trains into smaller pieces.
# * Once the interactive rebase is done, the remaining patches are rebased
# onto origin/master or the remote/branch specified by --remote and
# --target-branch. If you want to stack your CLs onto an already existing
# patch that is not part of your patch train use the ``--onto <hash>`
# option.
# * If the rebase is successful `checkpatch.pl` will be ran. You can skip this
# with --no-verify.
# * If verification is successful, the changes are pushed.
#
# Notes:
# * If you need to update a patch in the middle of a patch train, make sure
# when you push, you also push the same parents. This will ensure the
# parent hashes stay the same.
# * This workflow supports cros repo pushing. i.e., If you normally work on
# the ChromeOS coreboot tree, you can use this script to push to the
# upstream coreboot repo without leaving the comfort of your working tree.
# Say good bye to dealing with multiple checkouts.
# * There is nothing really coreboot specific about this script. It should
# work with any gerrit instance.
set -eu -o pipefail
OPTIONS=inswru:
LONGOPTIONS=no-interactive,dry-run,stay-on-upstream,wip,ready,upstream-branch:,no-verify,onto:,target-branch:,remote:
if ! PARSED="$(getopt --options="$OPTIONS" --longoptions="$LONGOPTIONS" --name "$0" -- "$@")"; then
exit 2
fi
# read getopt’s output this way to handle the quoting right:
eval set -- "$PARSED"
TEMP_BRANCH=
DRY_RUN=0
STAY_ON_UPSTREAM=0
INTERACTIVE=1
WIP=0
READY=0
VERIFY=1
REBASE_TARGET=
REMOTE=origin
TARGET_BRANCH="master"
# now enjoy the options in order and nicely split until we see --
while [[ $# -gt 0 ]]; do
case "$1" in
-d|--no-interactive)
INTERACTIVE=0
shift 1
;;
-n|--dry-run)
DRY_RUN=1
shift 1
;;
-s|--stay-on-upstream)
STAY_ON_UPSTREAM=1
shift 1
;;
-w|--wip)
WIP=1
shift 1
;;
-r|--ready)
READY=1
shift 1
;;
--no-verify)
VERIFY=0
shift 1
;;
--onto)
REBASE_TARGET="$2"
shift 2
;;
--target-branch)
TARGET_BRANCH="$2"
shift 2
;;
--remote)
REMOTE="$2"
shift 2
;;
--)
shift
break
;;
*)
echo "Unhandled args: $*"
exit 3
;;
esac
done
TOP_LEVEL="$(git rev-parse --show-toplevel)"
if [[ -z "$TOP_LEVEL" ]]; then
echo "Not in a git repo"
exit 1
fi
cd "$TOP_LEVEL"
CURRENT_BRANCH="$(git symbolic-ref --short HEAD)"
if [[ -z "$CURRENT_BRANCH" ]]; then
echo "Not on a branch"
exit 1
fi
function finish {
if [[ $STAY_ON_UPSTREAM -eq 0 ]]; then
REBASE_PATHS=(rebase-apply rebase-merge)
for REBASE_PATH in "${REBASE_PATHS[@]}"; do
REBASE_PATH="$(git rev-parse --git-path "${REBASE_PATH}")"
if [[ -d "$REBASE_PATH" ]]; then
echo "Rebase never completed, aborting"
git rebase --abort
fi
done
git checkout -q "${CURRENT_BRANCH}"
if [[ -n "$TEMP_BRANCH" ]]; then
git branch -D "$TEMP_BRANCH"
fi
else
echo "To switch to the previous branch run:"
echo " git checkout $CURRENT_BRANCH"
if [[ -n "$TEMP_BRANCH" ]]; then
echo "git branch -D $TEMP_BRANCH"
fi
fi
}
trap finish EXIT
if ! git config "remote.$REMOTE.url"; then
echo "Missing origin remote"
echo "Please add the origin"
echo "git remote add origin https://review.coreboot.org/coreboot.git && git fetch origin"
exit 1
fi
UPSTREAM_BRANCH=$REMOTE/$TARGET_BRANCH
if ! git rev-parse "$UPSTREAM_BRANCH" &> /dev/null; then
echo "Unknown branch $UPSTREAM_BRANCH"
echo "Did you forget to \`git fetch $REMOTE\`?"
exit 1
fi
if ! git diff-index --ignore-submodules --quiet HEAD --; then
echo "You have unstaged changes."
echo "Please commit or stash them."
exit 1
fi
TEMP_BRANCH_NAME="upstream-${CURRENT_BRANCH}-$(git rev-parse --short HEAD)"
if ! git branch --copy --force "$CURRENT_BRANCH" "$TEMP_BRANCH_NAME"; then
echo "Failed to copy branch";
exit 1;
fi
if ! git checkout "$TEMP_BRANCH_NAME"; then
echo "Failed to switch to $TEMP_BRANCH_NAME";
exit 1;
fi
TEMP_BRANCH="$TEMP_BRANCH_NAME"
if [[ -z "$REBASE_TARGET" ]]; then
REBASE_TARGET="$UPSTREAM_BRANCH"
fi
if [[ $INTERACTIVE -eq 1 ]]; then
if ! git rebase -i --onto "${REBASE_TARGET}"; then
echo "Interactive rebase onto ${UPSTREAM_BRANCH} failed."
exit 1
fi
# Interactive rebase does not support the --committer-date-is-author-date flag
# so we need to manually clean it up.
export FILTER_BRANCH_SQUELCH_WARNING=1
if ! git filter-branch -f \
--env-filter 'export GIT_COMMITTER_DATE="${GIT_AUTHOR_DATE}"' \
"@{upstream}"..; then
echo "Commit date cleanup failed!"
exit 1
fi
else
if ! git rebase --committer-date-is-author-date \
--onto "${REBASE_TARGET}"; then
echo "Rebase onto ${UPSTREAM_BRANCH} failed."
# I think we need to stop the git command
# (kill -SIGTSTP "$$")
exit 1
fi
fi
echo "Rebased commit: $(git rev-parse --short --verify HEAD)"
if [[ $VERIFY -eq 1 && -e ./util/lint/checkpatch.pl ]]; then
if ! ./util/lint/checkpatch.pl -g '@{u}..' --max-line-length 96; then
PS3="checkpatch failed, continue uploading? "
select OPT in "Yes" "No"
do
if [[ -z "$OPT" ]]; then
echo "Invalid option $OPT. Try another one."
continue
fi
break
done
if [[ "$OPT" != "Yes" ]]; then
exit 1
fi
fi
fi
if [[ $DRY_RUN -eq 0 ]]; then
GERRIT_OPTS=""
if [[ $WIP -eq 1 ]]; then
GERRIT_OPTS="%wip"
fi
if [[ $READY -eq 1 ]]; then
GERRIT_OPTS="%ready"
fi
if ! git push "$REMOTE" "$TEMP_BRANCH:refs/for/$TARGET_BRANCH${GERRIT_OPTS}"; then
echo " Failed to push!"
exit 1
fi
fi
#!/bin/bash
CONTRIB_DIR=/mnt/host/source/src/platform/dev/contrib
# shellcheck source=/usr/local/google/home/rrangel/chromiumos/src/platform/dev/contrib/common.sh
source "$CONTRIB_DIR/common.sh"
DEFINE_integer 'iterations' '1' 'number of iterations to run' 'i'
DEFINE_string 'test' '' 'autotest to run' 't'
DEFINE_string 'ip' "$IP" 'IP of the dut' ''
DEFINE_boolean 'profile' false 'Enable prof.data generation' ''
DEFINE_boolean 'top' false 'Enable top generation' ''
DEFINE_boolean 'turbostat' false 'Enable turbostat generation' ''
DEFINE_boolean 'regs' false 'Enable register dumps' ''
DEFINE_string 'args' '' 'Additional args to pass to the test' ''
FLAGS_HELP="USAGE: $0 [flags]"
# Parse the command-line.
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
set -xe
die() {
[ $# -gt 0 ] && echo "error: $*"
set +xe
flags_help
exit 1
}
if [[ -z "$FLAGS_test" ]]; then
die "--test is required"
fi
if [[ -z "$FLAGS_ip" ]]; then
die "--ip is required"
fi
#IP=100.107.108.232
INTERVAL_MS=100
TEST="$FLAGS_test"
#TEST=power_LoadTest.WIRED_1hr_acok
#TEST=power_Idle
ITERATIONS="${FLAGS_iterations}"
IP="${FLAGS_ip}"
RESULTS_DIR="$(mktemp --tmpdir="$HOME/workspace/tmp" -d "$BOARD-$(date +%Y-%m-%d)-$TEST.XXXX")"
exec > >(tee -i "$RESULTS_DIR/log") 2>&1
IDENTITY="${RESULTS_DIR}/.id_rsa"
cat /mnt/host/source/src/third_party/chromiumos-overlay/chromeos-base/chromeos-ssh-testkeys/files/testing_rsa > "$IDENTITY"
# Identity needs 400
chmod 400 "$IDENTITY"
CONTROL_MASTER="${RESULTS_DIR}/.master"
SSH_ARGS=(
-o "ConnectTimeout=5"
-o "UserKnownHostsFile=/dev/null"
-o "StrictHostKeyChecking=no"
-o "CheckHostIP=no"
-o "BatchMode=yes"
-i "$IDENTITY"
-S "$CONTROL_MASTER"
"root@$IP"
)
declare -a PIDS
function finish-iteration {
for pid in "${PIDS[@]}"
do
kill "$pid" || true
wait "$pid" || true
done
unset PIDS
declare -a PIDS
}
function finish {
finish-iteration
if [[ -e $CONTROL_MASTER ]]; then
ssh -O exit "${SSH_ARGS[@]}"
fi
if [[ -e $IDENTITY ]]; then
rm -rf "$IDENTITY"
fi
}
trap finish EXIT
function ctrl_c() {
echo "Stopping iteration"
STOP=1
}
trap ctrl_c INT
function fix-routing-table() {
ssh "${SSH_ARGS[@]}" bash - <<'EOF'
ROUTE="$(route -4)"
echo "Current routing table:"
echo "$ROUTE"
if grep default <<< "$ROUTE" | grep -q eth0; then
echo "Found eth0 as default"
if grep default <<< "$ROUTE" | grep -q wlan0; then
echo "Setting wlan0 as default gateway"
route add -net 100.67.80.0/23 gw 100.107.108.254 dev eth0
route del default gw 100.107.108.254
echo "New routing table:"
route -4
else
echo "No wifi connection was found, leaving eth0 as default"
fi
fi
true
EOF
}
mapfile -t CONTROLS < <(\
dut-control -i | cut -d' ' -f1 | grep '_mw' | grep -v vbat | sort)
echo "Starting control master"
ssh "${SSH_ARGS[@]}" -M -o "ControlPersist=10" -f -N
echo "Results will be placed in $RESULTS_DIR"
ITERATION="0"
STOP=0
while [ "$ITERATION" -lt "$ITERATIONS" ]
do
if [[ $STOP -ne 0 ]]; then
break
fi
echo "Starting iteration ${ITERATION}"
fix-routing-table
# Poke servod to see if the USB connection is active
if ! dut-control "${CONTROLS[0]}" > /dev/null; then
echo "Servod is acting up, waiting a few seconds"
sleep 5
fi
(
echo "# date: $(date -Iseconds)"
exec dut-control --debug --gnuplot --repeat 360000 \
--sleep_msecs "${INTERVAL_MS}" \
"${CONTROLS[@]}"
) > "${RESULTS_DIR}/$BOARD-$TEST-$ITERATION.tsv" &
PIDS+=("$!")
if [[ ${FLAGS_top} -eq ${FLAGS_TRUE} ]]; then
ssh "${SSH_ARGS[@]}" "top -b -c -w 200 -d 5 -o '%CPU' -H | awk '\$1 ~ /[0-9]+/ && \$9 == '0.0' {next} {print}'" \
> "${RESULTS_DIR}/$BOARD-$TEST-$ITERATION-top.txt" &
PIDS+=("$!")
fi
if [[ ${FLAGS_profile} -eq ${FLAGS_TRUE} ]]; then
ssh -ntt "${SSH_ARGS[@]}" perf record -a --output /dev/stdout \
> "${RESULTS_DIR}/$BOARD-$TEST-$ITERATION-perf.data" &
PIDS+=("$!")
fi
if [[ ${FLAGS_turbostat} -eq ${FLAGS_TRUE} ]]; then
ssh -n "${SSH_ARGS[@]}" turbostat \
> "${RESULTS_DIR}/$BOARD-$TEST-$ITERATION-turbostat.txt" &
PIDS+=("$!")
fi
if [[ ${FLAGS_regs} -eq ${FLAGS_TRUE} && -e "$HOME/workspace/scripts/reg-dump" ]]; then
ssh "${SSH_ARGS[@]}" bash - < "$HOME/workspace/scripts/reg-dump" \
> "${RESULTS_DIR}/$BOARD-$TEST-$ITERATION-regs.txt" &
PIDS+=("$!")
fi
test_that --iterations=1 --board=grunt \
--fast \
--debug \
--autotest_dir "${SRC_ROOT}/third_party/autotest/files/" \
--results_dir "${RESULTS_DIR}/$BOARD-$TEST-$ITERATION" \
--args="servo_host=localhost servo_port=$SERVOD_PORT servo_board=$BOARD ${FLAGS_args}" \
"${IP}" "${TEST}" || true
echo "Iteration ${ITERATION} Finished, stopping metrics"
# Sometimes the server won't die!
ssh "${SSH_ARGS[@]}" 'killall autotest; killall top; killall perf' || true
finish-iteration
ITERATION=$((ITERATION+1))
done
echo "Results are in $RESULTS_DIR"
#!/usr/bin/env python3
"""
Plots dut-control -g output
Setup a proper environment to run this script:
* Install miniconda: https://conda.io/en/latest/miniconda.html
* sh Miniconda3-latest-Linux-x86_64.sh
* conda config --set auto_activate_base false
* Setup conda environment
* conda create --name power python pandas matplotlib scipy natsort tzlocal
* Running the script
* conda activate power
* ./process-power-data /tmp/test_that_latest
"""
import time
import datetime
import dateutil
import pandas as pd
import numpy as np
import matplotlib
import glob
#matplotlib.use('GTK3Cairo') # or 'GTK3Cairo'
import matplotlib.pyplot as plt
from scipy import integrate
from collections import defaultdict
import pprint
from operator import attrgetter, itemgetter
from natsort import natsorted, ns
import json
from tzlocal import get_localzone
import concurrent.futures
#import concurrent.futures.ThreadPoolExecutor
import argparse
import tempfile
import os
import sys
import subprocess
import re
def auto_str(cls):
def __str__(self):
vals = vars(self)
return '%s(\n %s\n)' % (type(self).__name__, ',\n '.join(
'%s=%r' % (key, vals[key]) for key in sorted(vals)))
cls.__str__ = __str__
cls.__repr__ = __str__
return cls
@auto_str
class ResultLoop(object):
def __init__(self, result, checkpoint_index, rolling_window):
self.result = result
self.checkpoint_index = checkpoint_index
self.checkpoint = result.checkpoints.loc[checkpoint_index]
self.start_time = self.checkpoint['start_time']
self.end_time = self.checkpoint['end_time']
#print("Checkpoint %s start_time: %r, end_time: %r" % (self.checkpoint['instance'], self.start_time, self.end_time))
self._compute_top()
self._compute_pids()
self._compute_turbostat()
self._compute_task_manager()
self._compute_power(rolling_window)
self.write()
def _slice(self, df):
if df is None:
return None
return df.iloc[(df.index >= self.start_time)
& (df.index < self.end_time)]
def _compute_top(self):
# nothing fancy to do here
self.top = self._slice(self.result.top)
def _compute_pids(self):
if self.result.pids is None:
self.pids = None
return
# We need to pivot the pids to get them in a column
df = self._slice(self.result.pids)
df = df.pivot(columns='id', values='value')
df.fillna(0, inplace=True)
top_cols = []
# Compute top 10 pids
for prefix in ['pid_cpu_', 'pid_mem_']:
cols = [col for col in df if col.startswith(prefix)]
data = df[cols].apply(lambda col: integrate.trapz(col, x=col.index)
)
largest = data.nlargest(10)
top_cols.extend(largest.index)
self.pids = df[top_cols]
def _compute_task_manager(self):
if self.result.task_manager is None:
self.task_manager = None
return
df = self._slice(self.result.task_manager)
df = df.pivot(columns='id', values='value')
df.fillna(0, inplace=True)
top_cols = []
# Compute top 10 pids
for prefix in ['pid_cpu_', 'pid_audible_', 'pid_videos_playing_']:
cols = [col for col in df if col.startswith(prefix)]
data = df[cols].apply(lambda col: integrate.trapz(col, x=col.index)
)
largest = data.nlargest(15)
top_cols.extend(largest.index)
#for col in largest.index:
# df['top_%s' % (col)] = df[col]
self.task_manager = df[top_cols]
def _compute_turbostat(self):
if self.result.turbostat is None:
self.turbostat = None
return
df = self._slice(self.result.turbostat)
self.turbostat = df
def _compute_power(self, rolling_window):
if self.result.parent.power is None:
self.power_rolling = None
self.power_summary = None
return
self.power = self._slice(self.result.parent.power)
rolling = self.power.copy()
if rolling.index.size:
rolling.index = rolling.index - rolling.index[0]
else:
rolling.index = pd.TimedeltaIndex([])
# Rolling has a TimeDelta index so the graphs overlap
self.power_rolling = rolling.rolling(rolling_window, min_periods=1)
self.power_mean = self.power.mean()
summary = self.power_summary = {}
summary['floor'] = self.power.quantile(0.1)
#summary['median'] = self.power.median()
#summary['max'] = self.power.max()
summary['mean'] = self.power.mean()
summary['mWh'] = self.power.copy().apply(lambda col: integrate.trapz(
col, x=col.index))
try:
# print(summary['mWh'].notnull().all())
if summary['mWh'].notnull().all():
summary['mWh'] /= pd.Timedelta(1, 'h')
except Exception:
print(summary['mWh'])
print(type(summary['mWh']))
raise
summary['mWh'].rename(index=lambda x: x + 'h', inplace=True)
#mloop.summary['int'] = integral
#mloop.summary['std'] = self.power.std()
#mloop.summary['count'] = self.power.count()
# print(power.summary)
def _compute_power_loops(self):
if loops:
for loop in loops:
start = pd.Timedelta(loop[0], unit='s')
end = pd.Timedelta(loop[1], unit='s')
#print("Start: %s, End: %s" % (start, end))
loop_data = self.data.loc[start:end]
#print("Loop Data for %s:\n%s" % (loop, loop_data))
self.loops[loop] = loop_data
for loop in self.loops:
data = self.loops[loop]
rolling = data.rolling(window_seconds, min_periods=1)
def write(self):
if not self.top is None:
dest = os.path.join(
self.result.parent.path, 'top-%s-%s.csv' %
(self.result.iteration, self.checkpoint['instance']))
print(f"Writing: {dest}")
self.top.to_csv(dest, encoding='utf-8')
if not self.turbostat is None:
dest = os.path.join(
self.result.parent.path, 'turbostat-%s-%s.csv' %
(self.result.iteration, self.checkpoint['instance']))
print(f"Writing: {dest}")
self.turbostat.to_csv(dest, encoding='utf-8')
if not self.task_manager is None:
dest = os.path.join(
self.result.parent.path, 'task_manager-%s-%s.csv' %
(self.result.iteration, self.checkpoint['instance']))
print(f"Writing: {dest}")
self.task_manager.to_csv(dest, encoding='utf-8')
@auto_str
class IterationResult(object):
""" Iteration Results """
def __init__(self, path, parent, args):
self.path = path
self.parent = parent
self._parse_path()
print('test: %r, iteration: %r' % (self.test, self.iteration))
self._load_checkpoints()
print('test_start_time: %r' % (self.test_start_time))
self._load_os_version()
self._load_fw_version()
self._update_subplot()
self._load_interrupts()
self._load_task_manager()
self._load_turbostat()
self._load_top()
self._generate_loops(args)
def _parse_path(self):
_, result_part = os.path.split(self.path)
match = re.match(r'^results-(\d+)-(.*?)$', result_part)
if match:
self.iteration = int(match.group(1))
self.test = match.group(2)
self.test_dir = os.path.join(self.path, self.test)
else:
self.iteration = 0
self.test = None
self.test_dir = self.path
print(f"Iteration: {self.iteration}, Test: {self.test}")
def _load_checkpoints(self):
paths = [
os.path.join(self.test_dir, 'results', 'checkpoint_log.json'),
#os.path.join(self.base, self.prefix, self.test, 'results', 'checkpoint_log.json'),
]
self.test_start_time = None
self.test_end_time = None
self.checkpoints = None
for path in paths:
#print(path)
file = CheckpointFile(path)
if file.data is None:
continue
self.test_start_time = file.start_time
self.test_end_time = file.end_time
self.checkpoints = file.data
if self.checkpoints is None or self.checkpoints.empty:
print('Failed to load checkpoints from: %s' % (paths))
def _load_os_version(self):
paths = [
[self.test_dir, 'keyval'],
#[self.base, self.prefix, self.test, 'keyval'],
]
self.os_version = 'Unknown'
for path in paths:
path = os.path.join(*path)
file = KeyValFile(path)
if file.data:
self.os_version = file.data['CHROMEOS_RELEASE_VERSION']
break
def _load_fw_version(self):
paths = [
[self.path, 'sysinfo', 'crossystem'],
# [self.base, self.prefix, 'sysinfo', 'crossystem'],
]
self.fw_version = 'Unknown'
for path in paths:
path = os.path.join(*path)
file = KeyValFile(path)
if file.data:
self.fw_version = file.data['fwid']
break
def _update_subplot(self):
keys = [
'OS: %s' % (self.os_version),
'FW: %s' % (self.fw_version),
'Test: %s' % (self.test),
]
# if self.parent.run_id:
# keys.append('Run: %s' % (self.parent.run_id))
self.subplot = ', '.join(keys)
def _load_interrupts(self):
paths = [
[self.test_dir, 'sysinfo', 'iteration.1'],
#[self.base, self.prefix, self.test, 'sysinfo', 'iteration.1'],
]
self.interrupts = None
for path in paths:
interrupt_file = InterruptsFile(
before=os.path.join(*path, 'interrupts.before'),
after=os.path.join(*path, 'interrupts.after'))
if interrupt_file.data is None:
continue
self.interrupts = interrupt_file.data
def _load_top(self):
locations = [
#{
# 'path': os.path.join(self.base, '%s-top.txt' % (self.prefix)),
# 'start_time': self.test_that_start_time
#},
{
'path': os.path.join(self.test_dir,
'profiling/iteration.1/top'),
'start_time': self.test_start_time
},
#{
# 'path':
# os.path.join(self.base, self.prefix, self.test,
# 'profiling/iteration.1/top'),
# 'start_time':
# self.test_start_time
#},
]
self.top = None
self.pids = None
for location in locations:
if not os.path.isfile(location['path']):
continue
file = TopFile(
location['path'],
estimated_start_timestamp=location['start_time']
# TODO(rrangel): Use the test start and end times
#skip_head=skip_head,
#skip_tail=skip_tail,
)
if not file.data.empty:
self.top = file.data
self.pids = file.pid_data
break
if self.top is None or self.top.empty:
print('Failed to load top from: %s' %
([loc['path'] for loc in locations]))
def _load_task_manager(self):
paths = [
os.path.join(self.test_dir, 'results/task-monitor.json'),
#os.path.join(self.base, self.prefix, self.test, 'results/task-monitor.json')
]
self.task_manager = None
for path in paths:
if not os.path.isfile(path):
continue
file = TaskManager(path)
if not file.data.empty:
self.task_manager = file.data
break
if self.task_manager is None or self.task_manager.empty:
print('Failed to load task_manager from: %s' % (paths))
def _load_turbostat(self):
locations = [
#{
# 'path': os.path.join(self.base, '%s-turbostat.txt' % (self.prefix)),
# 'start_time': self.test_that_start_time
#},
{
'path':
os.path.join(self.test_dir, 'profiling/iteration.1/turbostat'),
'start_time':
self.test_start_time
},
#{
# 'path':
# os.path.join(self.base, self.prefix, self.test,
# 'profiling/iteration.1/turbostat'),
# 'start_time':
# self.test_start_time
#},
]
self.turbostat = None
for location in locations:
if not os.path.isfile(location['path']):
continue
file = Turbostat(
location['path'],
estimated_start_timestamp=location['start_time']
# TODO(rrangel): Use the test start and end times
#skip_head=skip_head,
#skip_tail=skip_tail,
)
if not file.data.empty:
self.turbostat = file.data
break
if self.turbostat is None or self.turbostat.empty:
print('Failed to load turbostat from: %s' %
([loc['path'] for loc in locations]))
def _generate_loops(self, args):
self.loops = loops = []
if self.checkpoints is None:
return
print(f"{self.checkpoints}")
if args.loops:
loop_checkpoints = self.checkpoints[
self.checkpoints['checkpoint'].isin(args.loops)
| self.checkpoints['instance'].isin(args.loops)]
else:
loop_checkpoints = self.checkpoints[self.checkpoints['level'] >= 1]
for index, row in loop_checkpoints.iterrows():
loop = ResultLoop(self, index, args.window)
loops.append(loop)
@auto_str
class Iteration(object):
""" Contains all the Iteration Results """
def __init__(self, folder_name, args):
self.path = folder_name
self._parse_file_name()
print(f'path: {self.path}')
self._load_test_that_start_time()
print('test_that_start_time: %r (%s)' %
(self.test_that_start_time, type(self.test_that_start_time)))
self._load_power(args)
self._load_results(args)
def _parse_file_name(self):
original_path = self.path
full_path = os.path.realpath(original_path)
if not os.path.isdir(full_path):
raise ValueError(f"{full_path} is not a directory")
if not os.path.isfile(os.path.join(full_path, "results.json")):
raise ValueError(f"{full_path} is missing results.json")
self.path = full_path
def _load_power(self, args):
path = f'{self.path}.tsv'
# if args.skip_head or args.skip_head:
# skip_head = args.skip_head
# skip_tail = args.skip_tail
# elif self.test_start_time and self.test_that_start_time:
# delta = self.test_start_time.timestamp() - self.start_time.timestamp()
# skip_head = delta
# delta = self.test_end_time.timestamp() - self.test_start_time.timestamp()
# skip_tail = skip_head + delta
# else:
# skip_head = None
# skip_tail = None
#self.data = self.data.shift(-delta.total_seconds(), freq='s')
self.power = None
if not os.path.isfile(path):
return
file = MeasurementFile(path,
include=args.include,
exclude=args.exclude,
start_timestamp=self.test_that_start_time)
self.power = file.data
def _load_test_that_start_time(self):
# grunt-2018-11-16-power_Idle.q4Jd/grunt-power_Idle-0/debug/test_that.INFO
path = [self.path, 'debug', 'test_that.INFO']
file = LogFile(os.path.join(*path))
self.test_that_start_time = file.start_time
def _load_results(self, args):
"""
if match:
self.iteration = int(match.group(2))
self.test = match.group(3)
self.prefix = f"{self.test} - {self.iteration}"
else:
self.iteration = None
self.test = None
self.prefix = ""
"""
dirs = glob.glob(os.path.join(self.path, 'results-*-*'))
dirs = sorted(dirs)
print("result dirs: %s" % (dirs))
results = []
for result_dir in dirs:
results.append(IterationResult(result_dir, self, args))
#TODO: Remove this after testing
#if len(results) > 10:
# break
self.results = results
@auto_str
class MeasurementValue:
def __init__(self, name, title, seconds_idx, value_idx):
self.name = name
self.title = title
self.seconds_idx = seconds_idx
self.value_idx = value_idx
@auto_str
class PowerLoopMeasurements(object):
pass
@auto_str
class TurboData(object):
pass
@auto_str
class TopData(object):
pass
@auto_str
class PidData(object):
pass
class LogFile(object):
def __init__(self, file_name):
self.name = file_name
if os.path.isfile(file_name):
self.data = self._load_data(file_name)
self.start_time = self.data['start_time']
else:
print('Failed to find %s' % (file_name))
self.data = {}
self.start_time = None
def _load_data(self, file_name):
data = {}
pattern = re.compile(
r'^(?P<month>\d+)/(?P<day>\d+) (?P<hour>\d+):(?P<min>\d+):(?P<seconds>\d+)\.(?P<ms>\d+) (?P<level>\w+)'
)
with open(file_name, 'rt') as file:
for line in file:
match = pattern.match(line)
if not match:
print('Line did not match: %s' % (line))
continue
# The log file doesn't contain the current year, so we guess
current_year = datetime.datetime.today().year
date = datetime.datetime(current_year,
int(match.group('month')),
int(match.group('day')),
int(match.group('hour')),
int(match.group('min')),
int(match.group('seconds')),
int(match.group('ms')) * 1000,
get_localzone())
if 'start_time' not in data:
data['start_time'] = pd.Timestamp(date)
break
return data
class CheckpointFile(object):
def __init__(self, file_name):
self.name = file_name
if os.path.isfile(file_name):
self.data, self.start_time, self.end_time = self._load_data(
file_name)
else:
self.data = None
self.start_time = None
self.end_time = None
def _load_data(self, file_name):
with open(file_name, 'rt') as file:
data = json.load(file)
start_times = []
end_times = []
index = []
for checkpoint in data:
if checkpoint.startswith('_'):
continue
intervals = data[checkpoint]
for interval in intervals:
if len(interval) != 2:
raise ValueError('Interval must have 2 elements: %s' %
(interval))
start_time = datetime.datetime.fromtimestamp(
interval[0], datetime.timezone.utc)
end_time = datetime.datetime.fromtimestamp(
interval[1], datetime.timezone.utc)
start_times.append(start_time)
end_times.append(end_time)
index.append(checkpoint)
data = pd.DataFrame(
{
'start_time': start_times,
'end_time': end_times,
'checkpoint': index
},
columns=['checkpoint', 'start_time', 'end_time'])
start_time = data['start_time'].min()
end_time = data['end_time'].max()
# Add a synthetic all to make loop logic nicer
data.loc[-1] = ['all', start_time, end_time]
data.sort_values(by=['start_time', 'end_time'],
ascending=[True, False],
inplace=True)
data.reset_index(inplace=True, drop=True)
data['start_delta'] = data['start_time'] - start_time
data['end_delta'] = data['end_time'] - start_time
self._compute_instance(data)
self._compute_levels(data)
#print(data)
#sys.exit(3)
return data, start_time, end_time
def _compute_instance(self, data):
unique = defaultdict(int)
for index, row in data.iterrows():
unique[row['checkpoint']] += 1
instances = []
seen = defaultdict(int)
for index, row in data.iterrows():
if unique[row['checkpoint']] > 1:
instances.append('%s.%d' %
(row['checkpoint'], seen[row['checkpoint']]))
seen[row['checkpoint']] += 1
else:
instances.append(row['checkpoint'])
data['instance'] = instances
def _compute_levels(self, data):
stack = []
levels = []
for index, row in data.iterrows():
#print("index %d\nstack: %s\n\nrow: %s\n" % (index, stack, row))
while len(stack):
parent = stack[-1]
if parent['start_time'] <= row['start_time'] < parent[
'end_time']:
#print("%s is a child of parent %s" % (row['checkpoint'], parent['checkpoint']))
stack.append(row)
break
else:
#print("%s is NOT a child of parent %s" % (row['checkpoint'], parent['checkpoint']))
stack.pop()
if not stack:
stack.append(row)
levels.append(len(stack) - 1)
#print("levels: %s\n----\n" % (levels))
data['level'] = levels
class KeyValFile(object):
def __init__(self, file_name):
self.name = file_name
if os.path.isfile(file_name):
self.data = self._load_data(file_name)
else:
self.data = {}
def _load_data(self, file_name):
data = {}
pattern = re.compile(r'([\w_]+)\s*=\s*(.*?)\s*(?:#.*)?$')
with open(file_name, 'rt') as file:
for line in file:
match = pattern.match(line)
if not match:
continue
data[match.group(1)] = match.group(2)
return data
class InterruptsFile(object):
def __init__(self, before, after):
if os.path.isfile(before) and os.path.isfile(after):
before = self._load_data(before)
after = self._load_data(after)
self.data = self._subtract(before, after)
else:
self.data = None
def _subtract(self, before, after):
s = after - before
return s
def _load_data(self, file_name):
data = {}
pattern = re.compile(r'^\s*(\w+):\s+(\d+)\s+(\d+)\s(.*)$')
with open(file_name, 'rt') as file:
for line in file:
match = pattern.match(line)
if not match:
continue
data[match.group(1)] = int(match.group(2)) + int(
match.group(3))
s = pd.Series(data)
return s
class TopFile(object):
def __init__(self, file_name, estimated_start_timestamp=None):
self.name = file_name
raw, start_time = self._load_data(file_name)
if start_time is None:
# We don't have a way of getting the actual timezone, but it currently
# matches my workstation so we will use that.
self.dut_tz = get_localzone()
# top data is using DUT timezone
start_time = estimated_start_timestamp.tz_convert(self.dut_tz)
print('Localized start_time: %s' % (start_time))
self.data, self.pid_data = self._build_data_frame(raw, start_time)
def _load_data(self, file_name):
all_data = []
date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}')
start_timestamp = None
with open(file_name, 'rt') as file:
data = None
for line in file:
if start_timestamp is None and date_pattern.match(line):
start_timestamp = dateutil.parser.parse(line)
#print('Found a timestamp for top!! %r %r' % (start_timestamp, line))
continue
# Let's pretend we are awk
parts = re.split(r'\s+', line.strip())
try:
if parts[0] == 'top':
if data:
#print("%s" % (data))
all_data.append(data)
data = None
# Just a hack to speed things up
#if len(all_data) > 100:
# break
if len(parts) > 11:
data = TopData()
data.pids = {}
data.time = datetime.time.fromisoformat(parts[2])
data.load_average_1 = float(parts[-3].rstrip(','))
data.load_average_5 = float(parts[-2].rstrip(','))
data.load_average_15 = float(parts[-1].rstrip(','))
elif not data:
continue # Skip partial lines
elif len(parts) > 5 and parts[0] == 'Threads:':
data.threads_total = int(parts[1])
data.threads_running = int(parts[3])
data.threads_sleeping = int(parts[5])
elif len(parts) > 15 and parts[0] == '%Cpu(s):':
data.cpu_user = float(parts[1])
data.cpu_sys = float(parts[3])
data.cpu_nice = float(parts[5])
data.cpu_idle = float(parts[7])
data.cpu_wait = float(parts[9])
data.cpu_hw_int = float(parts[11])
data.cpu_sw_int = float(parts[13])
data.cpu_stolen = float(parts[15])
elif len(parts
) > 9 and parts[0] == 'MiB' and parts[1] == 'Mem':
data.mem_total = float(parts[3])
data.mem_free = float(parts[5])
data.mem_used = float(parts[7])
data.mem_cache = float(parts[9])
elif len(parts) > 8 and parts[0] == 'MiB' and parts[
1] == 'Swap:':
data.mem_avail = float(parts[8])
elif parts[0].isdigit() and len(parts) > 11:
pid = int(parts[0])
pid_data = data.pids[pid] = PidData()
pid_data.pid = pid
pid_data.cpu = float(parts[8])
pid_data.mem = float(parts[9])
pid_data.command = parts[11:]
except:
print(parts)
raise
if data and data.pids:
all_data.append(data)
return all_data, start_timestamp
def _build_data_frame(self, raw, init_timestamp):
cols = [
#'time',
'load_average_1',
'load_average_5',
'load_average_15',
'threads_total',
'threads_running',
'threads_sleeping',
'cpu_user',
'cpu_sys',
'cpu_nice',
#'cpu_idle',
'cpu_wait',
'cpu_hw_int',
'cpu_sw_int',
'cpu_stolen',
'mem_total',
'mem_free',
'mem_used',
'mem_cache',
'mem_avail',
]
pid_cpu_title = {}
pid_mem_title = {}
pid_index = []
index = []
rows = []
pid_rows = []
# Because the data doesn't have a date we take a guess
current_date = init_timestamp.date()
start_timestamp = None
prev_seconds = None
total_delta = pd.Timedelta(0, unit='s')
for data in raw:
current_time = data.time
# Top has some very funky time reporting. The hour will shift around.
# for this reason we can't trust the hour, so we just use the minutes and
# seconds to compute an offset between readings.
if start_timestamp is None:
# We assume the hour of the initial reading is correct.
start_timestamp = pd.Timestamp.combine(
current_date,
current_time.replace(tzinfo=init_timestamp.tzinfo))
prev_seconds = current_time.minute * 60 + current_time.second
current_seconds = current_time.minute * 60 + current_time.second
delta = current_seconds - prev_seconds
if delta < 0:
# Add an hour because we rolled over
delta += 60 * 60
total_delta += pd.Timedelta(delta, unit='s')
current_timestamp = start_timestamp + total_delta
prev_seconds = current_seconds
index.append(current_timestamp)
row = [getattr(data, col) for col in cols]
rows.append(row)
# Generate a flat list that we can then pivot
for pid in data.pids:
pid_data = data.pids[pid]
title = pid_cpu_title.get(pid, None)
if title is None:
title = 'pid_cpu_%d - %s' % (pid, ' '.join(
pid_data.command))
pid_cpu_title[pid] = title
pid_rows.append([title, pid_data.cpu])
pid_index.append(current_timestamp)
#title = pid_mem_title.get(pid, None)
#if not title:
# pid_mem_title[pid] = 'pid_mem_%d - %s' % (pid, ' '.join(pid_data.command))
#pid_rows.append([current_timestamp, title, pid_data.mem])
top_df = pd.DataFrame(data=rows, columns=cols, index=index)
pid_df = pd.DataFrame(data=pid_rows,
columns=['id', 'value'],
index=pid_index)
return top_df, pid_df
class TaskManager(object):
def __init__(self, file_name):
self.name = file_name
raw = self._load_data(file_name)
self.data = self._build_data_frame(raw)
if self.data.empty:
print('%s was truncated to empty' % (file_name))
return
def _load_data(self, file_name):
data = []
# We some how get duplicate timestamps, we need to ignore them
dups = set()
# print(f"task_manager: {file_name}")
with open(file_name, 'rt') as file:
for line in file:
# The json file is missing the opening and closing [], so we parse
# it manually
line = line.rstrip(",\n")
try:
entry = json.loads(line)
except json.decoder.JSONDecodeError:
print(f"line: {line}")
raise
if entry['timestamp'] in dups:
#print("Found duplicate, skipping: %s" % (line))
continue
dups.add(entry['timestamp'])
data.append(entry)
return data
def _build_data_frame(self, raw):
cols = [
'timestamp',
'id',
'value',
]
metrics = ['cpu', 'audio_played', 'muted', 'videos_playing']
# Let's fix the pid naming
rename_map = {
'audio_played': 'audible'
}
# Reuse the strings to reduce memory usage
title_cache = defaultdict(dict)
index = []
rows = []
for measurement in raw:
timestamp = pd.Timestamp(measurement['timestamp'],
unit='ms',
tz=datetime.timezone.utc)
tab_info = {}
for info in measurement['tabInfo']:
tab_info[info['tabId']] = info
for process in measurement['processes']:
tasks = process['tasks']
# Default 0 if we don't have the info
cpu = process.get('cpu', 0) / len(tasks)
for task in tasks:
tab = None
# We will split the cpu usage evenly between all tasks in a process
if 'tabId' in task:
key = (process['pid'], task['tabId'], task['title'])
row_ids = title_cache[key]
tab = tab_info[task['tabId']]
if not row_ids:
for metric in metrics:
row_ids[metric] = 'pid_%s_%d.%d - %s' % (
(rename_map.get(metric, metric), ) + key)
else:
key = (process['pid'], task['title'])
row_ids = title_cache[key]
if not row_ids:
for metric in metrics:
row_ids[metric] = 'pid_%s_%d - %s' % (
(rename_map.get(metric, metric), ) + key)
for metric in metrics:
if metric == 'cpu':
rows.append([timestamp, row_ids[metric], cpu])
elif tab and metric in tab:
value = tab[metric]
if type(value) is bool:
rows.append([
timestamp, row_ids[metric],
1 if value else 0
])
else:
rows.append(
[timestamp, row_ids[metric], tab[metric]])
elif metric in task:
rows.append(
[timestamp, row_ids[metric], task[metric]])
else:
rows.append([timestamp, row_ids[metric], 0])
df = pd.DataFrame(data=rows, columns=cols)
# Drop ids with all zeros
totals_by_id = df.groupby(['id'])['value'].sum()
zero_ids = totals_by_id[totals_by_id == 0]
rows_to_drop = df[df['id'].isin(zero_ids.index)]
df.drop(index=rows_to_drop.index, inplace=True)
df.set_index('timestamp', inplace=True)
return df
class Turbostat(object):
def __init__(self, file_name, estimated_start_timestamp=None):
self.name = file_name
raw, start_timestamp = self._load_data(file_name)
if start_timestamp is None:
start_timestamp = estimated_start_timestamp
self.data = self._build_data_frame(raw, start_timestamp)
if self.data.empty:
print('%s was truncated to empty' % (file_name))
return
def _load_data(self, file_name):
date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}')
all_data = []
start_timestamp = None
with open(file_name, 'rt') as file:
data = None
for line in file:
if start_timestamp is None and date_pattern.match(line):
start_timestamp = dateutil.parser.parse(line)
#print('Found a timestamp for turbostat!! %r %r' % (start_timestamp,
# line))
continue
parts = re.split(r'\s+', line.strip())
try:
if parts[0] == '-' and parts[1] == '-':
# print(parts)
if len(parts) < 11:
raise ValueError('Invalid row %s' % (parts))
data = TurboData()
data.avg_mhz = int(parts[2])
data.c_pct_0 = float(parts[3])
data.c_mhz_0 = int(parts[4])
data.tsc_mhz = int(parts[5])
data.irq = int(parts[6])
data.c_mhz_1 = int(parts[7])
data.c_mhz_2 = int(parts[8])
data.c_pct_1 = float(parts[9])
data.c_pct_2 = float(parts[10])
all_data.append(data)
elif not data:
continue # Skip partial lines
except:
print(parts)
raise
return all_data, start_timestamp
def _build_data_frame(self, raw, start_timestamp):
cols = [
'avg_mhz',
'c_pct_0',
'c_mhz_0',
'tsc_mhz',
'irq',
'c_mhz_1',
'c_mhz_2',
'c_pct_1',
'c_pct_2',
]
# We don't have actual timestamps with each reading so we can't account
# for jitter.
td = pd.Timedelta(5, 's')
index = pd.DatetimeIndex(start=start_timestamp + td,
freq=td,
periods=len(raw))
rows = []
for data in raw:
row = [getattr(data, col) for col in cols]
rows.append(row)
df = pd.DataFrame(data=rows, columns=cols, index=index)
return df
@auto_str
class MeasurementFile(object):
def __init__(self,
file_name,
exclude=None,
include=None,
start_timestamp=None):
self.name = file_name
self.values = {}
with open(file_name, 'rt') as file:
#print("Starting processing on %s" % (self.name))
self._header, self.start_timestamp = self._read_header(file)
self.values = self._generate_values(exclude, include)
if self.start_timestamp is None:
self.start_timestamp = start_timestamp
self.data = self._load_file(file)
self._apply_column_filters(exclude, include)
# print("%s" % (self.data))
# print('Using timetamp: %r' % (self.start_timestamp))
def _msgpack_file(self):
return os.path.splitext(self.name)[0] + '.msgpack'
def _read_header(self, file):
start_timestamp = None
header = file.readline()
if header.startswith('# date: '):
start_timestamp = pd.Timestamp(header.lstrip('# date: ').strip())
print('Found power start timestamp: %s' % (start_timestamp))
header = file.readline()
header = header.lstrip('# ').strip()
header = header.split(' ')
return header, start_timestamp
def _generate_values(self, exclude, include):
values = {}
for seconds_idx in range(0, len(self._header) - 1, 2):
value_idx = seconds_idx + 1
value = self._header[value_idx]
title = value.replace('_mw', '')
values[value] = MeasurementValue(name=value,
title=title,
seconds_idx=seconds_idx,
value_idx=value_idx)
return values
def _load_file(self, file):
if os.path.isfile(self._msgpack_file()):
df = pd.read_msgpack(self._msgpack_file())
# See https://github.com/pandas-dev/pandas/issues/25148
df.index = df.index.tz_localize('UTC')
return df
idxs = [0] + [col.value_idx for col in self.values.values()]
df = pd.read_csv(
file,
delim_whitespace=True,
dtype=np.float64,
comment='#',
names=self._header,
header=None,
error_bad_lines=False,
index_col=0,
usecols=idxs,
#mangle_dupe_cols=False
)
# Drop last row because it might be incomplete
df.drop(df.tail(1).index, inplace=True)
tds = pd.to_timedelta(df.index, unit='s')
# See bug: https://github.com/pandas-dev/pandas/issues/24094
index = []
for td in tds:
index.append(self.start_timestamp + td)
df.index = pd.DatetimeIndex(index)
df.to_msgpack(self._msgpack_file())
return df
def _apply_column_filters(self, exclude, include):
columns = []
for column in self.data:
if exclude and column in exclude:
continue
if include and not column in include:
continue
columns.append(column)
self.data = self.data[columns]
def parse_args():
parser = argparse.ArgumentParser(description='Plot power_loadTest power.')
parser.add_argument(
'results',
nargs='+',
type=str, #argparse.FileType('rt'),
help='test_that_results_XXXXX folder ')
parser.add_argument('--exclude',
type=lambda s: s.split(','),
default=[],
help='measurements to exclude')
parser.add_argument('--include',
type=lambda s: s.split(','),
default=[],
help='Only these measurements will be included')
parser.add_argument('--range',
default='',
help='Seconds of data to show, i.e. `100:`, `100:600`')
parser.add_argument('--groupby',
default=10,
type=int,
help='Seconds of data to group by')
parser.add_argument('--window',
default='10s',
type=str,
help='Rolling window. e.g., 1s, 1m, 1h')
parser.add_argument('--legend',
default=False,
type=bool,
help='Show the legend')
parser.add_argument(
'--graphs',
type=lambda s: s.split(','),
#action='append',
default=[],
help='top, min, max, median')
parser.add_argument(
'--offset',
action='append',
type=lambda s: s.split('='),
default=[],
help='offset the data for a file. e.g., --offset=file.tsv=-4')
parser.add_argument('--loops',
type=lambda s: s.split(','),
default=[],
help='loop names, or `all`. e.g., email,web.2')
return parser.parse_args()
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
# svg_file_name = os.path.splitext(args.measurements)[0]+'.svg'
# prog = prog % (svg_file_name)
def timeTicks(x, pos):
d = datetime.timedelta(seconds=x / 10)
return str(d)
time_ticks_formatter = matplotlib.ticker.FuncFormatter(timeTicks)
percent_formatter = matplotlib.ticker.FuncFormatter(lambda y, _: '{:.1%}'.
format(y / 100))
simple_formatter = matplotlib.ticker.FuncFormatter(lambda y, _: '{:.1f}'.
format(y))
#seconds_formatter = matplotlib.ticker.FuncFormatter(
# lambda x, _: str(x) + " " + str(_) + str(type(x)) + str(type(_)))
def seconds_formatter(df):
def fmt(x, _):
pos = int(x)
if pos >= len(df.index):
return '?'
current_time = df.index[pos]
start_time = df.index[0]
val = current_time - start_time
val = str(val).replace('0 days', '')
if _ is None:
return '%s (%d) (%s)' % (val, current_time.timestamp(),
current_time.isoformat())
else:
return val
return matplotlib.ticker.FuncFormatter(fmt)
ALL_AVAILABLE_COLORS = ['c', 'r', 'g', 'b', '#AAAAAA']
POSSIBLE_COLORS = list(ALL_AVAILABLE_COLORS)
COLOR_ASSIGNMENT_MAP = {}
def draw_spans(axis, index, checkpoints):
if checkpoints is None:
return
#print(checkpoints)
#min_start_time = checkpoints['start_time'].min()
# Need to find the start and end ticks for each checkpoint
for idx, row in checkpoints.iterrows():
#print("Checkpoint: %s" % (row))
if row.level <= 1:
continue
checkpoint = row['checkpoint']
if checkpoint in COLOR_ASSIGNMENT_MAP:
color = COLOR_ASSIGNMENT_MAP[checkpoint]
elif POSSIBLE_COLORS:
color = POSSIBLE_COLORS.pop()
COLOR_ASSIGNMENT_MAP[checkpoint] = color
else:
print("No colors remaining for %s" % (checkpoint))
#start_time = row['start_time'] - min_start_time
#end_time = row['end_time'] - min_start_time
#print("Offsets: %s %s" % (start_time, end_time))
try:
# backfill: use NEXT index value if no exact match
start_index = index.get_loc(row['start_time'], method='backfill')
# pad: find the PREVIOUS index value if no exact match.
end_index = index.get_loc(row['end_time'], method='pad')
except KeyError:
# If we fail to find any matching indexes then skip the checkpoint
continue
#print('indexes (%s): %s %s' % (axis.get_title(), start_index, end_index))
#print("color: %s" % (str(color)))
axis.axvspan(start_index, end_index, facecolor=color, alpha=0.1)
depth = 0
def print_top_graphs(iterations, args):
PLOTS = [
#{
# 'title': 'Load Average',
# 'prefix': 'load_average_',
# 'kind': 'line',
# 'src': 'top',
#},
#{
# 'title': 'Memory',
# 'prefix': 'mem_',
# 'ylabel': 'MB'
#},
{
'title': 'CPU',
'prefix': 'cpu_',
'yformatter': percent_formatter,
'kind': 'area',
'src': 'top'
},
# {
# 'title': 'Threads',
# 'prefix': 'threads_',
# },
{
'title': 'Processes',
'prefix': 'pid_cpu_',
'yformatter': percent_formatter,
'kind': 'area',
'src': 'pids'
},
{
'title': 'C-State',
'prefix': 'c_pct_',
'yformatter': percent_formatter,
'kind': 'area',
'src': 'turbostat'
},
{
'title': 'Interrupts',
'prefix': 'irq',
#'yformatter': percent_formatter,
'kind': 'line',
'src': 'turbostat'
},
{
'title': 'Power',
'cols': ['ppdut5_mw'],
#'yformatter': percent_formatter,
'kind': 'line',
'src': 'power_mean'
},
{
'title': 'Chrome Processes',
'prefix': 'pid_cpu_',
#'yformatter': percent_formatter,
'kind': 'area',
'src': 'task_manager'
},
{
'title': 'Audible',
'prefix': 'pid_audible_',
#'yformatter': percent_formatter,
'kind': 'area',
'src': 'task_manager',
'optional': True
},
{
'title': 'Videos Playing',
'prefix': 'pid_videos_playing_',
#'yformatter': percent_formatter,
'kind': 'area',
'src': 'task_manager',
'optional': True
}
]
for iteration in iterations:
for result in iteration.results:
for loop in result.loops:
checkpoint = loop.checkpoint['instance']
matching_plots = []
for plot in PLOTS:
src = getattr(loop, plot['src']) if hasattr(
loop, plot['src']) else None
if src is None or src.empty:
# print(f"Didn't find a source for {plot['src']} on loop {loop.checkpoint['instance']}")
continue
matching_plots.append(plot)
if not matching_plots:
continue
fig, axes = plt.subplots(
nrows=len(matching_plots),
ncols=1,
figsize=(18, 13),
squeeze=False,
#sharex=True,
subplotpars=matplotlib.figure.SubplotParams(left=0.08,
right=0.72,
bottom=0.05,
top=0.9,
wspace=0.1,
hspace=0.2))
title = '%s, Iteration: %d, Loop: %s' % (
result.subplot, result.iteration, checkpoint)
fig.suptitle(title)
#fig.set_tight_layout(True)
for idx, plot in enumerate(matching_plots):
src_data = getattr(loop, plot['src'])
if src_data.empty:
continue
axis = axes[idx, 0]
axis.set_title(plot['title'])
if 'prefix' in plot:
cols = [
col for col in src_data
if col.startswith(plot['prefix'])
]
if not cols:
if 'optional' in plot and plot['optional']:
continue
raise ValueError(
'Failed to find columns with prefix %s in: %s'
% (plot['prefix'], src_data.columns))
else:
cols = [col for col in src_data if col in plot['cols']]
if not cols:
raise ValueError(
'Failed to find columns %s in: %s' %
(plot['cols'], src_data.columns))
yformatter = simple_formatter
axis.grid(True)
axis.xaxis.set_major_formatter(seconds_formatter(src_data))
if 'yformatter' in plot:
axis.yaxis.set_major_formatter(plot['yformatter'])
yformatter = plot['yformatter']
if 'xlabel' in plot:
axis.xaxis.set_label_text(plot['xlabel'])
if 'ylabel' in plot:
axis.yaxis.set_label_text(plot['ylabel'])
data = src_data[cols]
means = data.mean()
labels = [
'{} ({})'.format((col.replace(plot['prefix'], '')
if 'prefix' in plot else col)[:50],
yformatter.format_data(value))
for col, value in means.iteritems()
]
#print(labels)
#data = data.rename(columns=labels)
data.plot(ax=axis,
legend=True,
kind=plot.get('kind', 'line'),
use_index=False)
#print(result.checkpoints)
draw_spans(axis, data.index, result.checkpoints)
box = axis.get_position()
#axis.set_position([box.x0, box.y0, box.width * 0.7, box.height])
axis.legend(labels,
bbox_to_anchor=(1, 1),
loc='upper left',
prop={'size': 9})
#fig.tight_layout()
dest = os.path.join(
iteration.path,
'top-%d-%s.png' % (result.iteration, checkpoint))
print(f"Saving {dest}")
fig.savefig(dest)
# dest = os.path.join(iteration.path, 'top-%d-%s.csv' % (result.iteration, checkpoint))
# print(f"Saving {dest}")
# src_data.to_csv(
# dest,
# encoding='utf-8'
# )
def print_power_graphs(iterations, args):
loops_by_column = defaultdict(list)
for iteration in iterations:
for result in iteration.results:
for loop in result.loops:
if loop.power.index.size:
for column in loop.power:
loops_by_column[column].append(loop)
if not loops_by_column:
print('No power data found found')
return
graphs = defaultdict(lambda: defaultdict(list))
for column_name in sorted(loops_by_column.keys()):
loops = loops_by_column[column_name]
for loop in loops:
result = loop.result
iteration = result.parent
if iteration.iteration is not None:
name = 'Iteration: %d.%d' % (iteration.iteration,
result.iteration)
else:
name = os.path.splitext(os.path.basename(iteration.name))[0]
rolling = loop.power_rolling[column_name]
mWh = loop.power_summary['mWh'][column_name + 'h']
floor = loop.power_summary['floor'][column_name]
loop_name = '%s ' % (loop.checkpoint['instance'], )
name_with_mwh = '%s (%d mWh, floor: %d mW)' % (name, int(mWh),
int(floor))
if not args.graphs or 'power-raw' in args.graphs:
graphs[loop_name + column_name][result.subplot].append(
loop.power[column_name].rename(name_with_mwh))
if not args.graphs or 'power-mean' in args.graphs:
graphs[loop_name + column_name +
' mean'][result.subplot].append(
rolling.mean().rename(name_with_mwh))
if not args.graphs or 'power-max' in args.graphs:
graphs[loop_name + column_name +
' max'][result.subplot].append(
rolling.max().rename(name_with_mwh))
if not args.graphs or 'power-median' in args.graphs:
graphs[loop_name + column_name +
' median'][result.subplot].append(
rolling.median().rename(name_with_mwh))
if not args.graphs or 'power-min' in args.graphs:
graphs[loop_name + column_name +
' min'][result.subplot].append(
rolling.min().rename(name_with_mwh))
for key in sorted(graphs.keys()):
# print(key)
fig, axes = plt.subplots(nrows=len(graphs[key]),
ncols=1,
figsize=(18, 13),
squeeze=False,
subplotpars=matplotlib.figure.SubplotParams(
left=0.05,
right=0.8,
bottom=0.05,
top=0.9,
wspace=0.1,
hspace=0.2))
for idx, subplot in enumerate(sorted(graphs[key].keys())):
axis = axes[idx, 0]
if subplot:
axis.set_title('%s' % (subplot))
# df = pd.concat(graphs[key][subplot], axis=1)
for df in natsorted(graphs[key][subplot], key=attrgetter('name')):
df.plot(ax=axis, legend=args.legend)
axis.grid(True)
#axis.xaxis.set_major_formatter(time_ticks_formatter)
axis.xaxis.set_label_text('')
axis.yaxis.set_label_text('mW')
# print("Left %r, Right: %r" % (skip_head, skip_tail))
# axis.set_xlim(left=skip_head, right=skip_tail)
box = axis.get_position()
#axis.set_position([box.x0, box.y0, box.width * 0.7, box.height])
axis.legend(bbox_to_anchor=(1, 1),
loc='upper left',
prop={'size': 9})
fig.suptitle(key)
fig.savefig('%s.png' % (key.replace(' ', '_')))
fig.canvas.mpl_connect('pick_event', on_pick)
def summarize(iterations, loop):
with pd.option_context('display.max_rows', None, 'display.max_columns',
None, 'display.float_format', '{:.0f}'.format,
'display.width', 120):
if loop:
print('---------\nSummery for %s:' % (loop[2]))
run_name = '%s_loop-each.csv' % (loop[2])
sum_name = '%s_loop-summary.csv' % (loop[2])
else:
print('---------\nSummary:')
run_name = 'all_loops-each.csv'
sum_name = 'all_loops-summary.csv'
summaries = defaultdict(dict)
for iteration in iterations:
if loop not in iteration.power.loops:
continue
summary = iteration.power.loops[loop].summary
for metric in summary:
if not summary[metric].empty:
summaries[metric][iteration.name] = summary[metric]
if not loop:
if iteration.interrupts is not None:
summaries['interrupts'][
iteration.name] = iteration.interrupts
summaries['count'][iteration.name] = pd.Series(
{'interrupts': iteration.interrupts.sum()})
if not summaries:
return
#print(summaries)
for metric in summaries:
summaries[metric] = pd.concat(summaries[metric], axis=1)
# print(summaries.keys())
# print(summaries['count'])
summaries = pd.concat(summaries, axis=0)
summaries = summaries.transpose()
# Hack to sort the index
idx = pd.CategoricalIndex(summaries.index, natsorted(summaries.index))
summaries.index = idx
summaries.sort_index(inplace=True)
#print("%s\n" %(summaries))
summaries.to_csv(run_name, encoding='utf-8')
# totals = [file.loops[loop].data for file in files]
# totals = pd.concat(totals, ignore_index=True).describe()
# totals.drop(['count', '25%', '50%', '75%'], inplace=True)
#df = totals.groupby(lambda x: True).agg('mean', 'std', 'min')
# df = pd.DataFrame(totals).agg('mean', 'std', 'min')
#print("Totals:\n%s\n" % (totals))
#totals.to_csv(sum_name, encoding='utf-8')
#pd.options.display.float_format = None
#print("Means:\n%s\n" % (summaries['mean']))
subplot_keys = set()
for iteration in iterations:
subplot_keys.add(iteration.subplot)
for metric in ['mWh', 'count']:
if metric not in summaries:
continue
data = summaries[metric].copy()
subplots = []
for file_name in summaries.index:
for iteration in iterations:
if not iteration.name == file_name:
continue
subplots.append(iteration.subplot)
break
subplots = pd.Series(subplots, index=data.index)
data['subplot'] = subplots
data = data.groupby(['subplot'])
desc = data.describe()
print('%s\n' % (desc))
for subplot, group in data:
for col_name in group:
if col_name == 'subplot':
continue
col = group[col_name]
mean = desc[col_name][(subplot, 'mean')]
std = desc[col_name][(subplot, 'std')]
# There is not much variance so don't be noisy
if std / mean < 0.02:
continue
outliers = col[lambda x: np.abs(x - mean) > (.75 * std)]
if not outliers.empty:
print(
'%s\nOutliers for %s (mean: %.1f, std: %.1f):\n%s\n'
% (subplot, col_name, mean, std, outliers))
return summaries
#def print_interrupts(iterations):
def on_pick(event):
print('Picked: %s' % (event))
def main():
args = parse_args()
if args.range:
parts = args.range.split(':')
if len(parts) != 2:
print('--range %s is malformed' % (args.range))
sys.exit(1)
if parts[0]:
args.skip_head = float(parts[0]) # * 10
if parts[1]:
args.skip_tail = float(parts[1]) # * 10
else:
args.skip_head = None
args.skip_tail = None
offsets = defaultdict(lambda: 0)
for parts in args.offset:
if len(parts) != 2:
print('--offset %s is malformed' % (parts.join('=')))
sys.exit(1)
offsets[parts[0]] = float(parts[1])
args.offset = offsets
#loops = []
#for idx, loop in enumerate(args.loops):
# parts = loop.split(':', 3)
# if len(parts) < 2:
# print('--loops %s is malformed' % (args.range))
# sys.exit(1)
# if len(parts) == 3:
# name = parts[2]
# else:
# name = 'Loop %s (%s:%s)' % (idx, parts[0], parts[1])
# loops.append((float(parts[0]), float(parts[1]), name))
# THe GIL kills most of the performance
#pool = concurrent.futures.ThreadPoolExecutor(5)
#iterations = list(pool.map(lambda file_name: Iteration(file_name, args),
# args.measurements))
iterations = [Iteration(folder_name, args) for folder_name in args.results]
#pprint.pprint(iterations)
# Drop empty files
# files = [file for file in allfiles if file.values]
#graphs = {}
#plots = defaultdict(lambda: defaultdict(list))
#print("%s:" % (file.name))
#print("%s" % (file.data.describe()))
#print("Mean: %s" % (file.data.mean(axis=0)))
all_loops = [None]
# TODO(rrangel): Fix loops
#all_loops.extend(loops)
# for loop in all_loops:
# summiries = summarize(iterations, loop)
# TODO(rrangel): Print summary graphs
#print("Description:")
#print("%s" % (summaries.describe()))
for iteration in iterations:
for result in iteration.results:
for loop in result.loops:
loop_name = loop.checkpoint['instance']
# print(f"Loop: {loop_name}")
# print(iterations)
if not args.graphs or 'top' in args.graphs:
print_top_graphs(iterations, args)
if any(graph.startswith('power') for graph in args.graphs):
print_power_graphs(iterations, args)
plt.show()
sys.exit(1)
main()
#!/bin/bash -e
# Script to configure your host with a chromium
sudo apt-get install -yqq git-core gitk git-gui git-email curl lvm2 \
thin-provisioning-tools python-pkg-resources python-virtualenv xz-utils \
python3.6
CHROMIUMOS_PATH="$HOME/chromiumos"
mkdir -p "$CHROMIUMOS_PATH"
cd "$CHROMIUMOS_PATH"
if ! command -v repo > /dev/null; then
if [[ ! -d "depot_tools" ]]; then
git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
fi
if ! grep -q depot_tools "$HOME/.bashrc"; then
echo "export PATH=\$PATH:$CHROMIUMOS_PATH/depot_tools" >> "$HOME/.bashrc"
export "PATH=$PATH:$CHROMIUMOS_PATH/depot_tools"
fi
fi
if ! git config --global --get user.name > /dev/null; then
echo "Enter the name used for git commits: "
read -r gitname
if [[ -n "$gitname" ]]; then
git config --global user.name "$gitname"
fi
fi
if ! git config --global --get user.email > /dev/null; then
git config --global user.email "$USER@chromium.org"
fi
if command -v glinux-config > /dev/null; then
if [[ "$(glinux-config get custom_etc_sudoers_d)" = "recommended" ]]; then
sudo glinux-config set custom_etc_sudoers_d true
sudo glinux-updater
fi
fi
if command -v gsettings > /dev/null; then
# Disable auto mount
gsettings set org.cinnamon.desktop.media-handling automount false
fi
if false && [[ ! -f "/etc/sudoers.d/relax_requirements" ]]; then
cat > ./sudo_editor <<EOF
#!/bin/sh
[ "\$1" == "--" ] && shift # visudo might pass '--' arg
echo Defaults \!tty_tickets > \$1 # Entering your password in one shell affects all shells
echo Defaults timestamp_timeout=180 >> \$1 # Time between re-requesting your password, in minutes
EOF
chmod +x ./sudo_editor
sudo EDITOR=./sudo_editor visudo -f /etc/sudoers.d/relax_requirements
rm ./sudo_editor
fi
if ! git config --global --get http.cookiefile > /dev/null; then
echo "Navigate to https://chrome-internal.googlesource.com and click New Password"
echo "When you have the script press ENTER and an editor will open so you can paste the script"
read -r
$EDITOR set_cookie
bash -ex set_cookie
rm set_cookie
fi
echo -n "Testing internal git access... "
if git ls-remote https://chrome-internal.googlesource.com/chrome/src-internal.git > /dev/null; then
echo "Success"
else
echo "Failed connecting to https://chrome-internal.googlesource.com/chrome/src-internal.git"
echo "check your git cookies"
exit 1
fi
if [[ ! -d ".repo" ]]; then
repo init -u https://chrome-internal.googlesource.com/chromeos/manifest-internal.git \
--repo-url='https://chromium.googlesource.com/external/repo.git'
fi
if [[ ! -f "LICENSE" ]]; then
repo sync -j 8
fi
if [[ ! -d "chroot" ]]; then
cros_sdk --download
fi
echo "All setup"
echo "You need to \`source ~/.bashrc\` to update the PATH in your current shell."
echo "Use cros_sdk to enter the chroot"
echo "To build packages: cros_sdk -- ./build_packages --board=\${BOARD}"
echo "To build image: cros_sdk -- ./build_image --board=\${BOARD}"
echo "To flash image: cros_sdk -- cros flash --board=\${BOARD} usb://"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment