Skip to content

Instantly share code, notes, and snippets.

@mbotsu
Created March 27, 2020 02:24
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mbotsu/ad9b97558a163304725244b5ee898588 to your computer and use it in GitHub Desktop.
Save mbotsu/ad9b97558a163304725244b5ee898588 to your computer and use it in GitHub Desktop.
lightweight_convert.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "lightweight_convert.ipynb",
"provenance": [],
"collapsed_sections": [],
"authorship_tag": "ABX9TyNgmkUjMbRRt0FGTv5mB7oF",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/mbotsu/ad9b97558a163304725244b5ee898588/lightweight_convert.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "xgqTihdDGPj-",
"colab_type": "code",
"colab": {}
},
"source": [
"!git clone https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch.git"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "5bBK_LjdGfZX",
"colab_type": "code",
"colab": {}
},
"source": [
"!wget https://download.01.org/opencv/openvino_training_extensions/models/human_pose_estimation/checkpoint_iter_370000.pth"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "tisCTenfHp1a",
"colab_type": "code",
"colab": {}
},
"source": [
"import torch\n",
"import sys\n",
"sys.path.append(\"./lightweight-human-pose-estimation.pytorch\")\n",
"from models.with_mobilenet import PoseEstimationWithMobileNet\n",
"from modules.load_state import load_state\n",
"\n",
"\n",
"def convert_to_onnx(net, output_name):\n",
" input = torch.randn(1, 3, 360, 360)\n",
" input_names = ['data']\n",
" output_names = ['stage_0_output_1_heatmaps', 'stage_0_output_0_pafs',\n",
" 'stage_1_output_1_heatmaps', 'stage_1_output_0_pafs']\n",
"\n",
" torch.onnx.export(net, input, output_name, verbose=True, input_names=input_names, output_names=output_names)\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" net = PoseEstimationWithMobileNet()\n",
" checkpoint = torch.load(\"./checkpoint_iter_370000.pth\")\n",
" load_state(net, checkpoint)\n",
"\n",
" convert_to_onnx(net, \"small_model.onnx\")"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "pnYf2q_-qQ-H",
"colab_type": "code",
"colab": {}
},
"source": [
"!pip install onnx_coreml"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "DuoYriU7riHX",
"colab_type": "code",
"colab": {}
},
"source": [
"# Reference: PyTorch to CoreML Cheat Sheet\n",
"# https://medium.com/@kuluum/pytroch-to-coreml-cheatsheet-fda57979b3c6\n",
"from onnx_coreml import convert\n",
"import coremltools\n",
"import coremltools.proto.FeatureTypes_pb2 as ft \n",
"\n",
"scale = 1/256.\n",
"args = dict(is_bgr=True, image_scale = scale)\n",
"\n",
"coreml_model = convert(model='small_model.onnx', image_input_names=['data'], preprocessing_args=args)\n",
"coreml_model.save(\"small_model.mlmodel\")\n",
"\n",
"spec = coreml_model.get_spec()\n",
"input = spec.description.input[0]\n",
"input.type.imageType.colorSpace = ft.ImageFeatureType.BGR\n",
"input.type.imageType.height = 360 \n",
"input.type.imageType.width = 360\n",
"coremltools.utils.save_spec(spec, \"small_model2.mlmodel\")"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "tX-Eed70rn_o",
"colab_type": "code",
"colab": {}
},
"source": [
"from google.colab import files\n",
"files.download('small_model2.mlmodel')"
],
"execution_count": 0,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment