Skip to content

Instantly share code, notes, and snippets.

View yzhliu's full-sized avatar

Yizhi Liu yzhliu

  • Boson AI
  • Bay Area, the United States
View GitHub Profile
@yzhliu
yzhliu / hou_pre.c
Created April 25, 2012 04:22
precompile hou.c
double r;
int main(int argc, char **argv)
{
if (argc < 2)
{
if (argc == 2)
printf("%f\n", r);
return argc >= 4 + 0;
}
{
@yzhliu
yzhliu / hou.c
Created April 25, 2012 04:32
a calculator from Qiming Hou
#include <stdio.h>
#include <math.h>
#define clear 1;if(c>=11){c=0;sscanf(_,"%lf%c",&r,&c);while(*++_-c);}\
else if(argc>=4&&!main(4-(*_++=='('),argv))_++;g:c+=
#define puts(d,e) return 0;}{double a;int b;char c=(argc<4?d)&15;\
b=(*_%__LINE__+7)%9*(3*e>>c&1);c+=
#define I(d) (r);if(argc<4&&*#d==*_){a=r;r=usage?r*a:r+a;goto g;}c=c
#define return if(argc==2)printf("%f\n",r);return argc>=4+
#define usage main(4-__LINE__/26,argv)
#define calculator *_*(int)
@yzhliu
yzhliu / thrift-0.6.1-osx-10.10.2.patch
Last active August 29, 2015 14:16
Thrift-0.6.1 source patch for OSX 10.9+ (./configure --with-ruby=no CXXFLAGS="-std=c++11")
diff --git a/compiler/cpp/src/generate/t_rb_generator.cc b/compiler/cpp/src/generate/t_rb_generator.cc
index c9db29a..f4029d0 100644
--- a/compiler/cpp/src/generate/t_rb_generator.cc
+++ b/compiler/cpp/src/generate/t_rb_generator.cc
@@ -319,7 +319,11 @@ void t_rb_generator::generate_enum(t_enum* tenum) {
for(c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
// Populate the hash
int value = (*c_iter)->get_value();
- first ? first = false : f_types_ << ", ";
+ if (first) {
@yzhliu
yzhliu / run-mxnet-on-spark.sh
Last active May 1, 2016 15:22
Run mxnet training for MNIST on spark standalone
#!/bin/bash
CURR_DIR=$(cd `dirname $0`; pwd)
ROOT_DIR=$(cd $CURR_DIR/../../; pwd)
MODULE_DIR=/opt/mxnet
LIB_DIR=${MODULE_DIR}/lib
JAR=${MODULE_DIR}/mxnet-spark_2.10-0.1.2-SNAPSHOT.jar
LIBS=${MODULE_DIR}/mxnet-full_2.10-linux-x86_64-cpu-0.1.2-SNAPSHOT.jar
for jar in `ls ${LIB_DIR}/*.jar`; do
[ ! -z ${LIBS} ] && LIBS="${LIBS},"
@yzhliu
yzhliu / infer_lstm.py
Created June 29, 2016 01:56
infer_lstm.py
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
from lstm import lstm_unroll
from bucket_io import default_build_vocab
from rnn_model import LSTMInferenceModel
def make_input(char, vocab, arr):
@yzhliu
yzhliu / bench-mkldnn-conv.txt
Last active January 18, 2018 00:09
benchdnn for conv
https://github.com/01org/mkl-dnn/tree/master/tests/benchdnn
string: perf
convolution name
full conv-desc
number of giga ops calculated
effective cpu frequency in GHz (amb clocks[min] / time[min])
minimum time spent in ms
best gigaops (since it corresponds to mimimum time)
average time spent in ms
average gigaops (since it corresponds to average time)
@yzhliu
yzhliu / conv2d-x86-1.py
Created January 3, 2018 01:56
conv2d x86 bench with tvm sch-1
@generic.schedule_conv2d_nchw.register(["cpu"])
def schedule_conv2d(outs):
print('Run in x86 sch ...')
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
@generic.schedule_conv2d_nchw.register(["cpu"])
def schedule_conv2d(outs):
print('Run in x86 sch ...')
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
import tvm
import numpy
import time
# The size of the square matrix
N = 100000000
# The default tensor type in tvm
dtype = "float32"
target = "llvm"
#target = "llvm -mcpu=skylake-avx512"
// attr [data_vec] storage_scope = "global"
allocate data_vec[float32 * 1 * 56 * 8 * 64 * 3 * 9]
// attr [kernel_vec] storage_scope = "global"
allocate kernel_vec[float32 * 16 * 64 * 3 * 3 * 4]
produce data_vec {
// attr [iter_var(h.outer, )] pragma_scope = "parallel_launch_point"
// attr [iter_var(h.outer, )] pragma_scope = "parallel_barrier_when_finish"
for (h.outer, 0, 28) {
// attr [iter_var(h.inner, )] pragma_scope = "parallel_stride_pattern"
parallel (h.inner, 0, 2) {