Skip to content

Instantly share code, notes, and snippets.

/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
@ajtulloch
ajtulloch / Untitled60.ipynb
Last active December 6, 2019 21:10
nn.Linear error analysis
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@ajtulloch
ajtulloch / Block-Sparse GEMM.ipynb
Last active August 28, 2019 12:07
Block-Sparse GEMM.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
from tvm import relay
from mxnet.gluon import nn
import mxnet as mx
class TestBlock(nn.HybridBlock):
def __init__(self):
super(TestBlock, self).__init__()
self.conv = nn.Conv2D(8, 3, 1, 1, use_bias=False)
self.a000 = nn.Activation("relu")
self.a0_0 = nn.MaxPool2D(pool_size=2, strides=2)
diff --git a/src/relay/pass/quantize.cc b/src/relay/pass/quantize.cc
index 3a2e54c8..4059dc3a 100644
--- a/src/relay/pass/quantize.cc
+++ b/src/relay/pass/quantize.cc
@@ -340,18 +340,9 @@ Expr MulRealize(const Call& ref_call,
const auto* rhs = new_args[1].as<QRealizeIntExprNode>();
Expr ldata = lhs->data;
Expr rdata = rhs->data;
-
DataType dtype = cfg->dtype_activation;
@ajtulloch
ajtulloch / Untitled.ipynb
Created June 7, 2019 21:45
Untitled.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@ajtulloch
ajtulloch / Untitled41.ipynb
Last active April 30, 2019 02:49
RelayTVMFusionE2E.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
diff --git a/tutorials/optimize/opt_gemm.py b/tutorials/optimize/opt_gemm.py
index 44ee53a7..c9785cbf 100644
--- a/tutorials/optimize/opt_gemm.py
+++ b/tutorials/optimize/opt_gemm.py
@@ -44,24 +44,24 @@ import timeit
# The size of the matrix
# (M, K) x (K, N)
# You are free to try out different shapes, sometimes TVM optimization outperforms numpy with MKL.
-M = 1024
-K = 1024
@ajtulloch
ajtulloch / -
Created June 15, 2018 06:43
opt_gemm.diff
diff --git a/tutorials/optimize/opt_gemm.py b/tutorials/optimize/opt_gemm.py
index 44ee53a7..c9785cbf 100644
--- a/tutorials/optimize/opt_gemm.py
+++ b/tutorials/optimize/opt_gemm.py
@@ -44,24 +44,24 @@ import timeit
# The size of the matrix
# (M, K) x (K, N)
# You are free to try out different shapes, sometimes TVM optimization outperforms numpy with MKL.
-M = 1024
-K = 1024
#! /usr/bin/env python
import pexpect
import pexpect.replwrap
repl = pexpect.replwrap.REPLWrapper("lua", u"> ", None, u"> ")
output = repl.run_command("= 1 + 1", timeout=1).splitlines()[1:]
assert(int(output[0]) == 2)