Skip to content

Instantly share code, notes, and snippets.

@yzhliu
Created January 8, 2020 03:07
Show Gist options
  • Save yzhliu/1142a794dc8d8960ff94506c6ee9b48d to your computer and use it in GitHub Desktop.
Save yzhliu/1142a794dc8d8960ff94506c6ee9b48d to your computer and use it in GitHub Desktop.
import numpy as np
import tvm
from tvm.contrib import graph_runtime
from tvm.contrib.util import tempdir
from tvm import autotvm
from tvm import relay
import tvm.relay.testing
#import mxnet
#from mxnet.gluon.model_zoo.vision import get_model
batch_size = 1
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
dtype = "float16"
#mod, params = relay.testing.resnet.get_workload(
# num_layers=50, batch_size=batch_size, image_shape=image_shape, dtype=dtype)
from gluoncv import model_zoo, data, utils
block = get_model('resnet18_v1', pretrained=True)
input_shape = (batch_size, 3, 512, 512)
block.cast(dtype)
mod, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype)
net = mod["main"]
mod = relay.Module.from_expr(net)
opt_level = 3
target = 'llvm -device=arm_cpu -target=aarch64-linux-gnu -mattr=+v8.2a,+fullfp16,+fp-armv8,+dotprod,+crc,+crypto,+neon'
with autotvm.apply_graph_best('log/xxx.log'):
with relay.build_config(opt_level=opt_level):
graph, lib, params = relay.build_module.build(
mod, target, params=params)
ctx = tvm.cpu()
module = graph_runtime.create(graph, lib, ctx)
data_tvm = tvm.nd.array((np.random.uniform(size=data_shape)).astype(dtype))
module.set_input('data', data_tvm)
module.set_input(**params)
module.run()
# evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", ctx, number=100, repeat=1)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
(np.mean(prof_res), np.std(prof_res)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment