Skip to content

Instantly share code, notes, and snippets.

@smklein
Created July 20, 2017 22:13
Show Gist options
  • Save smklein/e332b2f2c2b92405c5c1aa4c424797f8 to your computer and use it in GitHub Desktop.
Save smklein/e332b2f2c2b92405c5c1aa4c424797f8 to your computer and use it in GitHub Desktop.
Hacky Filesystem Benchmarking scripts
#!/usr/bin/env sh
# Copyright 2017 The Fuchsia Authors
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT
set -eu
MOUNT_PATH="/benchmark"
BLOCK_PATH="/dev/class/block/005"
HOSTNAME="blurt-chip-axis-coral"
CMD_INITIALIZE="msleep 3000\nmkdir ${MOUNT_PATH}\numount data\n"
CMD_RUN_TEST="/boot/test/fs/fs-bench-test\nmsleep 250"
CMD_PREP_MINFS="mkfs ${BLOCK_PATH} minfs\nmount ${BLOCK_PATH} ${MOUNT_PATH}\n"
rm -f bench_autorun
FAIL_RESULT="\[FAILED\]"
GOLD_RESULT="ALL BENCHMARKS COMPLETE"
GOLD_PREFIX="BENCHMARKS BEGIN"
GOLD_SUFFIX="BENCHMARKS END"
# Initialize
echo -e "${CMD_INITIALIZE}" >> bench_autorun
# MemFS
echo -e "echo 'MemFS ${GOLD_PREFIX}'" >> bench_autorun
echo -e "${CMD_RUN_TEST}" >> bench_autorun
echo -e "echo 'MemFS ${GOLD_SUFFIX}'\n" >> bench_autorun
# MinFS
echo -e "${CMD_PREP_MINFS}" >> bench_autorun
echo -e "echo 'MinFS ${GOLD_PREFIX}'" >> bench_autorun
echo -e "${CMD_RUN_TEST}" >> bench_autorun
echo -e "echo 'MinFS ${GOLD_SUFFIX}'\n" >> bench_autorun
# Completion string (so we know we're done)
echo -e "msleep 1000" >> bench_autorun
echo -e "echo '${GOLD_RESULT}'" >> bench_autorun
echo "Building Magenta (with benchmarking autorun)"
cd ${FUCHSIA_ROOT}/magenta
USER_AUTORUN=bench_autorun make -j32
BUILD_DIR="${FUCHSIA_ROOT}/magenta/build-magenta-pc-x86-64"
TOOLS_DIR="${BUILD_DIR}/tools"
LOGFILE="${FUCHSIA_ROOT}/magenta/out.txt"
CMD_BOOTSERVER="${TOOLS_DIR}/bootserver ${BUILD_DIR}/magenta.bin"
CMD_LOGLISTENER="${TOOLS_DIR}/loglistener"
echo "Launching Bootserver + Loglistener"
rm -f ${LOGFILE}
touch ${LOGFILE}
${CMD_BOOTSERVER} &
PID_BOOTSERVER=$!
${CMD_LOGLISTENER} &> ${LOGFILE} &
PID_LOGLISTENER=$!
CMD_RESTART="${TOOLS_DIR}/netruncmd ${HOSTNAME} dm reboot"
echo "Restarting Device (assumed on)"
${CMD_RESTART}
FAIL=false
DONE=false
echo "Waiting for tests to pass..."
while [ ${DONE} = false ]; do
if grep "${GOLD_RESULT}" ${LOGFILE}
then
echo " Tests passed!"
DONE=true
fi
if grep "${FAIL_RESULT}" ${LOGFILE}
then
echo " BENCHMARK FAILURE"
FAIL=true
DONE=true
fi
sleep 1
done
kill ${PID_BOOTSERVER}
kill ${PID_LOGLISTENER}
echo "Rerunning make without 'autorun'"
make -j32
if [ ${FAIL} = false ]; then
echo "Parsing and uploading results"
python benchmark_parse.py --filesystems "MemFS" "MinFS" --gold-prefix "${GOLD_PREFIX}" --gold-suffix "${GOLD_SUFFIX}"
fi
cd -
#!/usr/bin/env python
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script intended to parse and upload data to Google Sheets
from __future__ import print_function
import argparse
import datetime
import httplib2
import os
import subprocess
from apiclient import discovery
from apiclient import errors as apierrors
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
args = argparse.ArgumentParser(parents=[tools.argparser])
parser = argparse.ArgumentParser()
parser.add_argument("--filesystems", nargs="+", type=str, required=True,
help="List of filesystems which we are benchmarking")
parser.add_argument("--gold-prefix", required=True,
help="String identifier before benchmarking output")
parser.add_argument("--gold-suffix", required=True,
help="String identifier after benchmarking output")
args = parser.parse_args()
except ImportError:
args = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if args:
credentials = tools.run_flow(flow, store, args)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
# Set up credentials; identify target spreadsheet
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
spreadsheetId = '1LIsmPxYOIZIcDr3eBGVmjkVIY8XPXCoMmJXNHG6LA2Y'
gitrev = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
gitmsg = subprocess.check_output(["git", "show", "-s", "--format=%s", gitrev]).strip()
metadata_names = ["Mx Git Rev", "Mx Git Msg", "Date"]
now = datetime.datetime.now()
metadata_values = [gitrev, gitmsg, now.isoformat() ]
for fs in args.filesystems:
print("Observing fs: " + fs)
results = []
with open("out.txt") as infile:
active = False
current_benchmark_name = ""
names = metadata_names[:]
values = metadata_values[:]
for line in infile:
# Only look for benchmarks relevant to this filesystem
if fs + " " + args.gold_prefix in line:
active = True
elif fs + " " + args.gold_suffix in line:
break
if active:
if "Benchmarking " in line:
# Benchmark begin
current_benchmark_name = line.split("Benchmarking ")[1].strip()
elif "Benchmark " in line:
# Sub-benchmark result
result = line.split("Benchmark ")[1]
bench_subname = result.split(":")[0].strip()
# Only look after Benchmark.*:...
str_int = result.split(":")[1]
# Only observe the section between "[" and "]"...
str_int = str_int[str_int.index("[")+1 : str_int.rindex("]")]
# Pick out the integers within this range, convert them
# to an int.
msec = [int(s) for s in str_int if s.isdigit()]
msec = int(''.join(map(str, msec)))
names.append(bench_subname)
values.append(msec)
elif "PASSED" in line and current_benchmark_name != "":
# Benchmark complete!
results.append([current_benchmark_name, names, values])
names = metadata_names[:]
values = metadata_values[:]
current_benchmark_name = ""
for benchmark in results:
# Send a request for the FS / Test name sheet to exist
bname = benchmark[0]
sheetName = fs + ', ' + bname
body = {
"requests": [ {
"addSheet" : {
"properties": {
"title": sheetName,
"tabColor": {
"red": float(hash(fs + "red") % 100) / 100,
"blue": float(hash(fs + "blue") % 100) / 100,
"green": float(hash(fs + "green") % 100) / 100,
}
},
},
},]
}
try:
result = service.spreadsheets().batchUpdate(
spreadsheetId=spreadsheetId, body=body).execute()
except apierrors.HttpError:
print(result)
# We will receive an error for spreadsheets that already
# exist...
print("Ignoring Http error...")
# Send a request to update the FS / Test sheet TEST NAMES
bench_subnames = benchmark[1]
bench_subtimes = benchmark[2]
values = [ bench_subnames, ]
body = {
"valueInputOption": "USER_ENTERED",
"data": {
"range": sheetName + "!A1",
"values": values,
}
}
result = service.spreadsheets().values().batchUpdate(
spreadsheetId=spreadsheetId, body=body).execute()
# Send a request to update the FS / Test sheet TEST VALUES
rangeName = sheetName + '!B1'
values = [ bench_subtimes, ]
body = { 'values': values }
result = service.spreadsheets().values().append(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption="USER_ENTERED", body=body).execute()
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment