Created
September 14, 2018 21:32
-
-
Save mkolod/4b75080a470afb09ff588a334aad2a40 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import mxnet as mx | |
from mxnet.gluon.model_zoo import vision | |
import os | |
import time | |
batch_shape = (1, 3, 224, 224) | |
resnet18 = vision.resnet18_v2(pretrained=True) | |
resnet18.hybridize() | |
resnet18.forward(mx.nd.zeros(batch_shape)) | |
resnet18.export('resnet18_v2') | |
sym, arg_params, aux_params = mx.model.load_checkpoint('resnet18_v2', 0) | |
# Create sample input | |
input = mx.nd.zeros(batch_shape) | |
# Execute with MXNet | |
os.environ['MXNET_USE_TENSORRT'] = '0' | |
executor = sym.simple_bind(ctx=mx.gpu(0), data=batch_shape, grad_req='null', force_rebind=True) | |
executor.copy_params_from(arg_params, aux_params) | |
# Warmup | |
print('Warming up MXNet') | |
for i in range(0, 10): | |
y_gen = executor.forward(is_train=False, data=input) | |
y_gen[0].wait_to_read() | |
# Timing | |
print('Starting MXNet timed run') | |
start = time.process_time() | |
for i in range(0, 10000): | |
y_gen = executor.forward(is_train=False, data=input) | |
y_gen[0].wait_to_read() | |
end = time.time() | |
print(time.process_time() - start) | |
# Execute with TensorRT | |
print('Building TensorRT engine') | |
os.environ['MXNET_USE_TENSORRT'] = '1' | |
arg_params.update(aux_params) | |
all_params = dict([(k, v.as_in_context(mx.gpu(0))) for k, v in arg_params.items()]) | |
executor = mx.contrib.tensorrt.tensorrt_bind(sym, ctx=mx.gpu(0), all_params=all_params, | |
data=batch_shape, grad_req='null', force_rebind=True) | |
#Warmup | |
print('Warming up TensorRT') | |
for i in range(0, 10): | |
y_gen = executor.forward(is_train=False, data=input) | |
y_gen[0].wait_to_read() | |
# Timing | |
print('Starting TensorRT timed run') | |
start = time.process_time() | |
for i in range(0, 10000): | |
y_gen = executor.forward(is_train=False, data=input) | |
y_gen[0].wait_to_read() | |
end = time.time() | |
print(time.process_time() - start) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Taken from here and tested.
Output on a 6-core Skylake CPU (Intel Core i7-7800X CPU @ 3.50GHz), with an NVIDIA Titan V:
The above timing is in seconds. The test was done using a Docker image
and run using nvidia-docker. To install nvidia-docker, check here for instructions.
MXNet-TensorRT can also be installed using pip packages made for CUDA 9.0 and 9.2:
or
For the pip installation, make sure you already installed TensorRT and OpenBLAS. For OpenBLAS, on Ubuntu 16.04, do
Similar things can be done on other versions of Ubuntu, Debian, RHEL, CentOS, etc.
For TensorRT, you can download it here and follow the instructions on that page, or here.
As you can see, the Docker image approach is the easiest.