Skip to content

Instantly share code, notes, and snippets.

@higumachan
Created August 19, 2019 07:27
Show Gist options
  • Save higumachan/ce81f499349e2665059fe3931f7226c1 to your computer and use it in GitHub Desktop.
Save higumachan/ce81f499349e2665059fe3931f7226c1 to your computer and use it in GitHub Desktop.
from pathlib import Path
import tvm
from chainer import Variable
from chainer.links import ResNet50Layers
from tvm import relay
from tvm.contrib import graph_runtime
import numpy as np
import torch
import torchvision
from benchmarker import Benchmarker
from torchvision.models import inception, resnet50
from hirundo.onnx import get_network
loop = 1
data_shape = (1, 3, 224, 224)
with Benchmarker(loop, width=20, cycle=3, extra=1) as bench:
@bench('pytorch')
def _(bm):
x = torch.randn(*data_shape, requires_grad=True)
resnet = resnet50()
with bm:
resnet(x)
@bench('chainer')
def _(bm):
resnet = ResNet50Layers(None)
x = np.random.randn(*data_shape).astype(np.float32)
with bm:
resnet.forward(x)
@bench('chainer ideep')
def _(bm):
resnet = ResNet50Layers(None)
resnet.to_intel64()
x = Variable(np.random.randn(*data_shape).astype(np.float32))
x.to_intel64()
with bm:
resnet.forward(x)
@bench('chainerx')
def _(bm):
resnet = ResNet50Layers(None)
resnet.to_chx()
x = Variable(np.random.randn(*data_shape).astype(np.float32))
x.to_chx()
with bm:
resnet.forward(x)
@bench("tvm tuned")
def _(bm):
# load the module back.
temp = Path("resnet50")
loaded_json = open(temp / "deploy_graph.json").read()
loaded_lib = tvm.module.load(str(temp / "deploy_lib.tar"))
loaded_params = bytearray(open((temp / "deploy_param.params"), "rb").read())
input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32"))
module = graph_runtime.create(loaded_json, loaded_lib, tvm.cpu())
module.load_params(loaded_params)
with bm:
module.run(**{"gpu_0/data_0": input_data})
out_deploy = module.get_output(0).asnumpy()
@bench("tvm avx")
def _(bm):
# load the module back.
target = "llvm -mcpu=skylake-avx512"
net, params = get_network({
"onnx_file": "test_assets/resnet50/model.onnx",
"input_name": "gpu_0/data_0"
}, data_shape, "float32")
input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32"))
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(net, target=target, params=params)
module = graph_runtime.create(graph, lib, tvm.cpu())
#module.load_params(params)
with bm:
module.run(**{"gpu_0/data_0": input_data})
out_deploy = module.get_output(0).asnumpy()
@bench("tvm llvm")
def _(bm):
# load the module back.
target = "llvm"
net, params = get_network({
"onnx_file": "test_assets/resnet50/model.onnx",
"input_name": "gpu_0/data_0"
}, data_shape, "float32")
input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32"))
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(net, target=target, params=params)
module = graph_runtime.create(graph, lib, tvm.cpu())
#module.load_params(params)
with bm:
module.run(**{"gpu_0/data_0": input_data})
out_deploy = module.get_output(0).asnumpy()
if __name__ == '__main__':
pass
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment