Created
March 25, 2022 11:50
-
-
Save Ahwar/80600ae719e317dc73110f2c9a119492 to your computer and use it in GitHub Desktop.
run ONNX model inference using onnxruntime
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import onnxruntime as nxrun | |
import numpy as np | |
import time | |
def mills(): | |
"""function to get current | |
return int(round(time.time() * 1000)) | |
## start inference session | |
sess = nxrun.InferenceSession("path/to/model_file.onnx") | |
## input, output shape | |
input_name = sess.get_inputs()[0].name | |
output_name = sess.get_outputs()[0].name | |
input_shape = sess.get_inputs()[0].shape | |
output_shape = sess.get_outputs()[0].shape | |
start_time = mills() | |
# dummy_input shape should be equal to input_shape | |
dummy_input = np.ones([1, 416, 416, 3], dtype=np.float32) | |
## run onnx model with onnx runtime python | |
result = sess.run(None, {input_name: dummy_input}) | |
print("model single inference in milliSeconds on onnxruntime: ", mills() - start_time) | |
print("Output", result) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment