Skip to content

Instantly share code, notes, and snippets.

@aurotripathy
Created April 18, 2023 20:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save aurotripathy/a316f681aa49910f5370d4e03aab4806 to your computer and use it in GitHub Desktop.
Save aurotripathy/a316f681aa49910f5370d4e03aab4806 to your computer and use it in GitHub Desktop.
"""
The model is from https://github.com/derronqi/yolov7-face
Here, we download yolov7s-face.pt and convert it.
Output is model.onnx
"""
import torch
from models.experimental import attempt_load
from pudb import set_trace
from utils.torch_utils import select_device
batch_size = 1
x = torch.randn(batch_size, 3, 640, 640, requires_grad=True)
device = select_device('cpu')
weights_file = './chekpoint/yolov7s-face.pt'
# set_trace()
model = attempt_load(weights_file, map_location=device) # load FP32 model
print(f'Model Loaded')
torch_out = model(x) # check
# Export the model
torch.onnx.export(model, # model being run
x, # model input (or a tuple for multiple inputs)
"model.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=12, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes
'output' : {0 : 'batch_size'}})
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment