Skip to content

Instantly share code, notes, and snippets.

View zxybazh's full-sized avatar

Xiyou Zhou zxybazh

  • OctoML
  • Seattle
View GitHub Profile
x=["SyBPtQfAZ","H1S8UE-Rb","S1sRrN-CW","Syt0r4bRZ","HkPCrEZ0Z","rJ5C67-C-","H1T2hmZAb","Hymt27b0Z",
"HJ5AUm-CZ","r1nzLmWAb","HkGJUXb0-","SkERSm-0-","BJlrSmbAZ","HJXyS7bRb","SyhRVm-Rb","SkwAEQbAb",
"B1mvVm-C-","S1TgE7WR-","H1DkN7ZCZ","SJ71VXZAZ","ryk77mbRZ","HJIhGXWCZ","BJInMmWC-","H1I3M7Z0b",
"Bk-ofQZRb","SJx9GQb0-","BJoBfQ-0b","SJyVzQ-C-","HJNGGmZ0Z","H1kMMmb0-","HkGbzX-AW","rJIgf7bAZ",
"SyCyMm-0W","r1ayG7WRZ","H1Nyf7W0Z","HkCvZXbC-","ByED-X-0W","ByuI-mW0W","H1BHbmWCZ","SkqV-XZRZ",
"rk07ZXZRb","HJCXZQbAZ","H1bbbXZC-","rkaqxm-0b","S1XolQbRW","B1TYxm-0-","Bkftl7ZCW","SyBBgXWAZ",
"SkrHeXbCW","S1ANxQW0b","ByOExmWAb","By4Nxm-CW","r1l4eQW0Z","B12QlQWRW","ry831QWAb","B1EGg7ZCb",
"HyMTkQZAb","rJ6iJmWCW","rkZB1XbRZ","HJnQJXbC-","Sy3fJXbA-","HJ8W1Q-0Z","HknbyQbC-","BkrsAzWAb",
"ryH20GbRW","r1HhRfWRZ","B1KFAGWAZ","Byht0GbRZ","B1hYRMbCW","S1q_Cz-Cb","BJ7d0fW0b","HyydRMZC-",
"SyZI0GWCZ","rJSr0GZR-","ryZERzWCZ","rkeZRGbRW","ryazCMbR-","Hyig0zb0Z","H11lAfbCW","HkXWCMbRW",
import org.junit.Before;
import org.junit.Test;
public class ClassRosterTester {
private RosterManager roster;
@Before
public void initializeRoster() {
roster = new RosterManager();
@zxybazh
zxybazh / MaxGpuMem.sh
Created April 17, 2020 05:15
Script to record maxmimum GPU memory usage of the first card using nvidia-smi in a time period.
#!/bin/bash
max=0
while true; do
tmp=$(nvidia-smi | grep 11019MiB | awk '{split($0,a,"|"); print a[3]}' | awk '{split($0, a, "/"); print a[1]}' | awk 'NR==1{print $1}' | awk '{split($0, a, "M"); print a[1]}')
if [[ "$tmp" > "$max" ]]; then
max=$tmp
fi
echo $max "MiB"
sleep .5;
done
@zxybazh
zxybazh / GenFailCaseNum.py
Created April 27, 2021 03:48
Generate the failure cases of the given situation, divided by C(52, 8)*C(8, 4) for failure probability
ff = []
def f(x):
return ff[x]
def c(n, m):
return f(n)/f(m)/f(n-m)
ff.append(1)
for i in range(1, 100):
@zxybazh
zxybazh / test_compute_at.py
Last active February 7, 2022 23:17
The test script for failing compute at.
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
@zxybazh
zxybazh / test_compute_at_cuda.py
Last active February 4, 2022 10:20
The cuda test script for failing compute at.
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
@zxybazh
zxybazh / cuda log.py
Created March 7, 2022 23:59
Tune_relay cuda tuning log.
This file has been truncated, but you can view the full file.
INFO:tvm.meta_schedule.tune:Working directory: /tmp/tmppqhiwht0
INFO:tvm.meta_schedule.tune:Before task deduplication: 20 tasks
INFO:tvm.meta_schedule.tune:After task deduplication: 20 tasks
INFO:tvm.meta_schedule.builder.local_builder:LocalBuilder: max_workers = 24
INFO:tvm.meta_schedule.runner.local_runner:LocalRunner: max_workers = 1
[14:36:09] /home/zxybazh/tvm-tensorir/src/meta_schedule/task_scheduler/task_scheduler.cc:96: Initializing Task #0: "vm_mod_fused_layout_transform", mod =
# from tvm.script import tir as T
@tvm.script.ir_module
class Module:
@T.prim_func