Skip to content

Instantly share code, notes, and snippets.

import tvm
from tvm import relay, autotvm
import larq_zoo
import numpy as np
# import tvm.contrib.graph_runtime as runtime
import tvm.contrib.debugger.debug_runtime as runtime
import tvm.relay.testing
from tvm.autotvm.tuner import RandomTuner
import logging
import sys
import numpy as np
import tvm
import tvm.topi.testing
from tvm import te, testing
from tvm.topi.utils import get_const_tuple
from tvm import autotvm, topi
import logging
import sys
import numpy as np
import tvm
import tvm.topi.testing
from tvm import te, testing
from tvm.topi.utils import get_const_tuple
from tvm import autotvm, topi
Computational DAG:
data = PLACEHOLDER [1, 2048]
weight = PLACEHOLDER [1000, 2048]
T_dense(i, j) += (data[i, k]*weight[j, k])
bias = PLACEHOLDER [1000]
T_add(i, j) = (T_dense[i, j] + bias[j])
----------------------------------------------------------------------
------------------------------ [ Search ]
----------------------------------------------------------------------
{"input": ["llvm -device=arm_cpu -target=aarch64-unknown-linux-gnu -mattr=+neon", "depthwise_conv2d_NCHWc.x86", [["TENSOR", [1, 576, 16, 16], "int16"], ["TENSOR", [576, 1, 3, 3], "int16"], [1, 1], [0, 0, 0, 0], [1, 1], "NCHW", "NCHW", "int32"], {}], "config": {"index": 948, "code_hash": null, "entity": [["tile_ic", "sp", [-1, 4]], ["tile_oc", "sp", [-1, 4]], ["tile_ow", "sp", [-1, 7]], ["unroll_kw", "ot", true]]}, "result": [[0.000205725], 0, 0.3024606704711914, 1587601624.52548], "version": 0.2, "tvm_version": "0.7.dev1"}
{"input": ["llvm -device=arm_cpu -target=aarch64-unknown-linux-gnu -mattr=+neon", "depthwise_conv2d_NCHWc.x86", [["TENSOR", [1, 192, 30, 30], "int16"], ["TENSOR", [192, 1, 3, 3], "int16"], [1, 1], [0, 0, 0, 0], [1, 1], "NCHW", "NCHW", "int32"], {}], "config": {"index": 572, "code_hash": null, "entity": [["tile_ic", "sp", [-1, 96]], ["tile_oc", "sp", [-1, 96]], ["tile_ow", "sp", [-1, 4]], ["unroll_kw", "ot", true]]}, "result": [[0.0002175626], 0, 3.8473615646362305, 1587602877.3487854], "ver
B _build/lib/
B _build/lib_test/
B _build/benchmarks/*
B /home/masa/.opam/4.07.1+BER/lib/asp/
B /home/masa/.opam/4.07.1+BER/lib/letrec/
CMT /home/masa/projects/languages/ocaml/research/metaocaml/yallop/metaocaml-letrec/_build/lib
B /home/masa/projects/languages/ocaml/research/metaocaml/yallop/metaocaml-letrec/_build/lib
S /home/masa/projects/languages/ocaml/research/metaocaml/yallop/metaocaml-letrec/lib
S lib
S lib_test
B _build/lib/
B _build/lib_test/
B /home/masa/.opam/4.07.1+BER/lib/letrec/
CMT /home/masa/.opam/4.07.1+BER/lib/ocaml/compiler-libs
B /home/masa/.opam/4.07.1+BER/lib/ocaml/compiler-libs
S lib
S lib_test
EXT meta
PKG delimcc
FLG -open Trx
fn (%v54: Tensor[(16, 3), float32], %v60: Tensor[(16), float32], %v61: Tensor[(16), float32], %v66: Tensor[(16, 4), float32], %v72: Tensor[(16), float32], %v73: Tensor[(16), float32], %v94: Tensor[(4), float32], %v95: Tensor[(4), float32], %states: List[List[(Tensor[(2, 4), float32], Tensor[(2, 4), float32])]], %input: Tensor[(5, 2, 3), float32], %v135: Tensor[(16, 3), float32], %v141: Tensor[(16), float32], %v142: Tensor[(16), float32], %v147: Tensor[(16, 4), float32], %v153: Tensor[(16), float32], %v154: Tensor[(16), float32], %v175: Tensor[(4), float32], %v176: Tensor[(4), float32], %v222: Tensor[(16, 4), float32], %v228: Tensor[(16), float32], %v229: Tensor[(16), float32], %v234: Tensor[(16, 4), float32], %v240: Tensor[(16), float32], %v241: Tensor[(16), float32], %v262: Tensor[(4), float32], %v263: Tensor[(4), float32], %v303: Tensor[(16, 4), float32], %v309: Tensor[(16), float32], %v310: Tensor[(16), float32], %v315: Tensor[(16, 4), float32], %v321: Tensor[(16), float32], %v322: Tensor[(16), float32], %
fn (%v45: Tensor[(16, 3), float32], %v51: Tensor[(16), float32], %v52: Tensor[(16), float32], %v57: Tensor[(16, 4), float32], %v63: Tensor[(16), float32], %v64: Tensor[(16), float32], %v85: Tensor[(4), float32], %v86: Tensor[(4), float32], %states: List[(Tensor[(2, 4), float32], Tensor[(2, 4), float32])], %input: Tensor[(5, 2, 3), float32], %v119: Tensor[(16, 3), float32], %v125: Tensor[(16), float32], %v126: Tensor[(16), float32], %v131: Tensor[(16, 4), float32], %v137: Tensor[(16), float32], %v138: Tensor[(16), float32], %v159: Tensor[(4), float32], %v160: Tensor[(4), float32]) -> (Tensor[(?, 2, ?), float32], List[(Tensor[(2, 4), float32], Tensor[(2, 4), float32])]) {
%0 = Nil /* ty=List[Tensor[(?, 2, ?), float32]] */;
%1 = Nil /* ty=List[Tensor[(2, 4), float32]] */;
%2 = @nth(%states, 0 /* ty=int32 */) /* ty=(Tensor[(2, 4), float32], Tensor[(2, 4), float32]) */;
%38 = (
let %while_loop: fn (int32, List[Tensor[(2, 4), float32]], (Tensor[(2, 4), float32], Tensor[(2, 4), float32]), Tensor[(5, 2, 3
ANTLR runtime and generated code versions disagree: 4.8!=4.7.2
ANTLR runtime and generated code versions disagree: 4.8!=4.7.2
graph(%self : __torch__.custom_lstms.StackedLSTM,
%input.1 : Tensor,
%states.1 : (Tensor, Tensor)[]):
%i.2 : int = prim::Constant[value=0]() # /home/masa/projects/dev/torchscript-to-tvm/custom_lstms.py:80:8
%i.3 : int = prim::Constant[value=1]() # /home/masa/projects/dev/torchscript-to-tvm/custom_lstms.py:80:8
%output_states.1 : (Tensor, Tensor)[] = prim::ListConstruct()
%6 : __torch__.torch.nn.modules.container.ModuleList = prim::GetAttr[name="layers"](%self)
%7 : __torch__.custom_lstms.LSTMLayer = prim::GetAttr[name="0"](%6)