Skip to content

Instantly share code, notes, and snippets.

View comaniac's full-sized avatar

Cody Yu comaniac

View GitHub Profile
@comaniac
comaniac / tvm_settings.json
Last active November 20, 2019 19:06
VSCode Settings for TVM
{
"python.pythonPath": "/usr/local/bin/python3",
"python.testing.nosetestArgs": [
"tests"
],
"python.testing.pytestEnabled": false,
"python.testing.unittestEnabled": false,
"python.testing.nosetestsEnabled": false,
"python.testing.promptToConfigure": false,
"python.linting.pylintEnabled": true,
@comaniac
comaniac / lorien_vscode_settings.json
Created February 22, 2020 00:40
Rename and put this file to <path-to-lorien>/.vscode/settings.json
{
"python.pythonPath": "/usr/bin/python3",
"python.linting.mypyEnabled": true,
"python.linting.pylintEnabled": true,
"python.linting.pylintCategorySeverity.convention": "Warning",
"python.linting.pylintCategorySeverity.refactor": "Warning",
"python.linting.pylintCategorySeverity.warning": "Warning",
"python.linting.pylintArgs": [
"--rcfile",
"${workspaceRoot}/tests/lint/pylintrc"
import tvm
from tvm import relay
from tvm.relay.dataflow_pattern import *
#class ExtractArgs(tvm.relay.ExprMutator):
# # Mutate the graph to replace the matched graph input with new vars.
# def __init__(self, mapped_vars):
# super(ExtractArgs, self).__init__()
# self.mapped_vars = set(mapped_vars)
# self.vars = []
[style]
# Align closing bracket with visual indentation.
align_closing_bracket_with_visual_indent=True
# Allow dictionary keys to exist on multiple lines. For example:
#
# x = {
# ('this is the first element of a tuple',
# 'this is the second element of a tuple'):
# value,
import numpy as np
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.relay.backend import compile_engine
import gluoncv as gcv
model_name = 'MobileNet1.0'
import numpy as np
import tvm
from tvm import relay
from tvm.autotvm.graph_tuner import DPTuner
from tvm.contrib import graph_runtime
import torch
import torchvision
import numpy as np
import tvm
from tvm import auto_scheduler, te, topi
# The last layer in resnet
H, W, CO, CI, KH, KW, strides, padding = 7, 7, 512, 512, 3, 3, (1, 1), (1, 1)
def conv2d_diff(N, H, W, CO, CI, KH, KW, stride, padding):
data = te.placeholder((N, CI, H, W), name="data")
import os
import numpy as np
import logging
import tvm
from tvm import auto_scheduler, te, topi
from tvm.topi.nn.util import get_pad_tuple
from tvm.auto_scheduler.compute_dag import ComputeDAG
extracted_reduction_ax0, extracted_reduction_ax1, extracted_reduction_ax2, extracted_reduction_ax3, extracted_reduction_n0_n0_k1_shifted_shifted, extracted_reduction_n1_n1_k2_shifted_shifted, extracted_reduction_n2_n2_k3_shifted_shifted = tuple(extracted_reduction.op.axis) + tuple(extracted_reduction.op.reduce_axis)
pad_temp_data_grad_ax0, pad_temp_data_grad_ax1, pad_temp_data_grad_ax2, pad_temp_data_grad_ax3 = tuple(pad_temp_data_grad.op.axis) + tuple(pad_temp_data_grad.op.reduce_axis)
pad_temp_i0, pad_temp_i1, pad_temp_i2, pad_temp_i3 = tuple(pad_temp.op.axis) + tuple(pad_temp.op.reduce_axis)
compute_kernel_grad_ax0, compute_kernel_grad_ax1, compute_kernel_grad_ax2, compute_kernel_grad_ax3, compute_kernel_grad_n0_n0_k0_shifted_shifted, compute_kernel_grad_n1_n1_k2_shifted_shifted, compute_kernel_grad_n2_n2_k3_shifted_shifted = tuple(compute_kernel_grad.op.axis) + tuple(compute_kernel_grad.op.reduce_axis)
compute_kernel_grad_local, = s.cache_write([compute_kernel_grad], "local")
compute_kernel_grad_local_ax0
import logging
import numpy as np
import tvm
from tvm import relay, te, topi, transform, auto_scheduler
from tvm.contrib import graph_runtime
from tvm.relay.backend import compile_engine
# logging.basicConfig(level=logging.INFO)