Skip to content

Instantly share code, notes, and snippets.

@mbotsu
Created November 29, 2022 00:45
Show Gist options
  • Save mbotsu/3de024c36582f21306e23473e9975841 to your computer and use it in GitHub Desktop.
Save mbotsu/3de024c36582f21306e23473e9975841 to your computer and use it in GitHub Desktop.
ViTPose to Coreml
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "19627144",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"torch version: 1.13.0+cu117 False\n",
"torchvision version: 0.14.0+cu117\n",
"mmpose version: 0.24.0\n",
"cuda version: not available\n",
"compiler information: GCC 9.4\n"
]
}
],
"source": [
"import torch, torchvision\n",
"\n",
"print('torch version:', torch.__version__, torch.cuda.is_available())\n",
"print('torchvision version:', torchvision.__version__)\n",
"\n",
"# Check MMPose installation\n",
"import mmpose\n",
"\n",
"print('mmpose version:', mmpose.__version__)\n",
"\n",
"# Check mmcv installation\n",
"from mmcv.ops import get_compiling_cuda_version, get_compiler_version\n",
"\n",
"print('cuda version:', get_compiling_cuda_version())\n",
"print('compiler information:', get_compiler_version())"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "49d76c5c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: coremltools in /home/o2.linux/.local/lib/python3.8/site-packages (6.1)\n",
"Requirement already satisfied: packaging in /home/o2.linux/.local/lib/python3.8/site-packages (from coremltools) (21.3)\n",
"Requirement already satisfied: sympy in /home/o2.linux/.local/lib/python3.8/site-packages (from coremltools) (1.11.1)\n",
"Requirement already satisfied: protobuf<=4.0.0,>=3.1.0 in /home/o2.linux/.local/lib/python3.8/site-packages (from coremltools) (3.20.3)\n",
"Requirement already satisfied: numpy>=1.14.5 in /home/o2.linux/.local/lib/python3.8/site-packages (from coremltools) (1.23.5)\n",
"Requirement already satisfied: tqdm in /home/o2.linux/.local/lib/python3.8/site-packages (from coremltools) (4.64.1)\n",
"Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /home/o2.linux/.local/lib/python3.8/site-packages (from packaging->coremltools) (3.0.9)\n",
"Requirement already satisfied: mpmath>=0.19 in /home/o2.linux/.local/lib/python3.8/site-packages (from sympy->coremltools) (1.2.1)\n"
]
}
],
"source": [
"!pip install coremltools"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "d95dfd1d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"apex is not installed\n",
"apex is not installed\n",
"apex is not installed\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Torch version 1.13.0+cu117 has not been tested with coremltools. You may run into unexpected errors. Torch 1.12.1 is the most recent version that has been tested.\n"
]
}
],
"source": [
"import argparse\n",
"import warnings\n",
"\n",
"import numpy as np\n",
"import torch\n",
"\n",
"from mmpose.apis import init_pose_model\n",
"\n",
"import coremltools as ct\n",
"from coremltools.models.neural_network import quantization_utils\n",
"import coremltools.proto.FeatureTypes_pb2 as ft"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "70ab4dea",
"metadata": {},
"outputs": [],
"source": [
"def _convert_batchnorm(module):\n",
" \"\"\"Convert the syncBNs into normal BN3ds.\"\"\"\n",
" module_output = module\n",
" if isinstance(module, torch.nn.SyncBatchNorm):\n",
" module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,\n",
" module.momentum, module.affine,\n",
" module.track_running_stats)\n",
" if module.affine:\n",
" module_output.weight.data = module.weight.data.clone().detach()\n",
" module_output.bias.data = module.bias.data.clone().detach()\n",
" # keep requires_grad unchanged\n",
" module_output.weight.requires_grad = module.weight.requires_grad\n",
" module_output.bias.requires_grad = module.bias.requires_grad\n",
" module_output.running_mean = module.running_mean\n",
" module_output.running_var = module.running_var\n",
" module_output.num_batches_tracked = module.num_batches_tracked\n",
" for name, child in module.named_children():\n",
" module_output.add_module(name, _convert_batchnorm(child))\n",
" del module\n",
" return module_output\n",
"\n",
"\n",
"def pytorch2onnx(model,\n",
" input_shape,\n",
" opset_version=11,\n",
" show=False,\n",
" output_file='tmp.onnx',\n",
" verify=False):\n",
" \"\"\"Convert pytorch model to onnx model.\n",
"\n",
" Args:\n",
" model (:obj:`nn.Module`): The pytorch model to be exported.\n",
" input_shape (tuple[int]): The input tensor shape of the model.\n",
" opset_version (int): Opset version of onnx used. Default: 11.\n",
" show (bool): Determines whether to print the onnx model architecture.\n",
" Default: False.\n",
" output_file (str): Output onnx model name. Default: 'tmp.onnx'.\n",
" verify (bool): Determines whether to verify the onnx model.\n",
" Default: False.\n",
" \"\"\"\n",
" model.cpu().eval()\n",
"\n",
" one_img = torch.randn(input_shape)\n",
"\n",
" # example_input = torch.rand(1, 3, 256, 192) \n",
" traced_model = torch.jit.trace(model, one_img)\n",
" out = traced_model(one_img)\n",
"\n",
" # Reference: https://github.com/zllrunning/face-parsing.PyTorch/issues/27\n",
" scale = 1.0 / (0.226 * 255.0)\n",
" red_bias = -0.485 / 0.226\n",
" green_bias = -0.456 / 0.226\n",
" blue_bias = -0.406 / 0.226\n",
"\n",
" coreml_model = ct.convert(\n",
" traced_model,\n",
" inputs=[ct.ImageType(name=\"input\",\n",
" shape=one_img.shape,\n",
" scale=scale,\n",
" color_layout=\"BGR\",\n",
" bias=[blue_bias, green_bias, red_bias])],\n",
" )\n",
"\n",
" coreml_model.save(output_file)\n",
"\n",
" model_fp16 = quantization_utils.quantize_weights(coreml_model, nbits=16)\n",
" output_file_fp16 = output_file.replace(\".mlmodel\", \"_fp16.mlmodel\")\n",
"\n",
" spec = model_fp16.get_spec()\n",
"\n",
" ct.utils.rename_feature(spec, 'var_748', 'output')\n",
" # ct.utils.rename_feature(spec, 'var_1024', 'output')\n",
"\n",
" # def update_multiarray_to_float(feature):\n",
" # if feature.type.HasField(\"multiArrayType\"):\n",
" # feature.type.multiArrayType.dataType = ft.ArrayFeatureType.FLOAT32\n",
"\n",
" # for feature in spec.description.output:\n",
" # update_multiarray_to_float(feature)\n",
"\n",
" ct.utils.save_spec(spec, output_file_fp16)"
]
},
{
"cell_type": "markdown",
"id": "88de9d41",
"metadata": {},
"source": [
"## Download vitpose-b.pth\n",
"https://1drv.ms/u/s!AimBgYV7JjTlgSMjp1_NrV3VRSmK?e=Q1uZKs"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "a8686529",
"metadata": {},
"outputs": [],
"source": [
"import easydict\n",
"\n",
"args = easydict.EasyDict({\n",
" \"config\": 'configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/ViTPose_base_coco_256x192.py', \n",
" \"checkpoint\": 'vitpose-b.pth',\n",
" \"shape\": [1, 3, 256, 192],\n",
" \"opset_version\": 11,\n",
" \"show\": False,\n",
" \"output_file\": \"simplebaseline.mlmodel\",\n",
" \"verify\": False\n",
"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e0501c7c",
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/tmp/ipykernel_3109/3762317690.py:13: UserWarning: \u001b[107m\u001b[1m\u001b[31mDeprecationWarning: This tool will be deprecated in future. \u001b[34mWelcome to use the unified model deployment toolbox MMDeploy: https://github.com/open-mmlab/mmdeploy\u001b[0m\n",
" warnings.warn(msg)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Use load_from_local loader\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/o2/work/posetest/limawork/mmcv/mmcv/cnn/bricks/wrappers.py:88: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
" if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):\n",
"Converting PyTorch Frontend ==> MIL Ops: 100%|████████████████████████████████████████████████████████████████▊| 452/453 [00:02<00:00, 170.42 ops/s]\n",
"Running MIL Common passes: 5%|████ | 2/39 [00:00<00:02, 13.45 passes/s]/home/o2.linux/.local/lib/python3.8/site-packages/coremltools/converters/mil/mil/passes/name_sanitization_utils.py:135: UserWarning: Output, '748', of the source model, has been renamed to 'var_748' in the Core ML model.\n",
" warnings.warn(msg.format(var.name, new_name))\n",
"Running MIL Common passes: 100%|███████████████████████████████████████████████████████████████████████████████| 39/39 [00:06<00:00, 6.45 passes/s]\n",
"Running MIL Clean up passes: 100%|█████████████████████████████████████████████████████████████████████████████| 11/11 [00:02<00:00, 4.00 passes/s]\n",
"Translating MIL ==> NeuralNetwork Ops: 100%|████████████████████████████████████████████████████████████████████| 748/748 [01:21<00:00, 9.13 ops/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Quantizing using linear quantization\n",
"Quantizing layer x.1 of type convolution\n",
"Quantizing layer x.5_scale of type scale\n",
"Quantizing layer qkv.1 of type innerProduct\n",
"Quantizing layer attn.1 of type batchedMatmul\n",
"Quantizing layer 99 of type batchedMatmul\n",
"Quantizing layer input.7 of type innerProduct\n",
"Quantizing layer input.11_scale of type scale\n",
"Quantizing layer input.13 of type innerProduct\n",
"Quantizing layer input.17 of type innerProduct\n",
"Quantizing layer x.7_scale of type scale\n",
"Quantizing layer qkv.5 of type innerProduct\n",
"Quantizing layer attn.5 of type batchedMatmul\n",
"Quantizing layer 151 of type batchedMatmul\n",
"Quantizing layer input.25 of type innerProduct\n",
"Quantizing layer input.29_scale of type scale\n",
"Quantizing layer input.31 of type innerProduct\n",
"Quantizing layer input.35 of type innerProduct\n",
"Quantizing layer x.9_scale of type scale\n",
"Quantizing layer qkv.9 of type innerProduct\n",
"Quantizing layer attn.9 of type batchedMatmul\n",
"Quantizing layer 203 of type batchedMatmul\n",
"Quantizing layer input.43 of type innerProduct\n",
"Quantizing layer input.47_scale of type scale\n",
"Quantizing layer input.49 of type innerProduct\n",
"Quantizing layer input.53 of type innerProduct\n",
"Quantizing layer x.11_scale of type scale\n",
"Quantizing layer qkv.13 of type innerProduct\n",
"Quantizing layer attn.13 of type batchedMatmul\n",
"Quantizing layer 255 of type batchedMatmul\n",
"Quantizing layer input.61 of type innerProduct\n"
]
}
],
"source": [
"assert args.opset_version == 11, 'MMPose only supports opset 11 now'\n",
"\n",
"# Following strings of text style are from colorama package\n",
"bright_style, reset_style = '\\x1b[1m', '\\x1b[0m'\n",
"red_text, blue_text = '\\x1b[31m', '\\x1b[34m'\n",
"white_background = '\\x1b[107m'\n",
"\n",
"msg = white_background + bright_style + red_text\n",
"msg += 'DeprecationWarning: This tool will be deprecated in future. '\n",
"msg += blue_text + 'Welcome to use the unified model deployment toolbox '\n",
"msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'\n",
"msg += reset_style\n",
"warnings.warn(msg)\n",
"\n",
"model = init_pose_model(args.config, args.checkpoint, device='cpu')\n",
"model = _convert_batchnorm(model)\n",
"\n",
"# onnx.export does not support kwargs\n",
"if hasattr(model, 'forward_dummy'):\n",
" model.forward = model.forward_dummy\n",
"else:\n",
" raise NotImplementedError(\n",
" 'Please implement the forward method for exporting.')\n",
"\n",
"# convert model to onnx file\n",
"pytorch2onnx(\n",
" model,\n",
" args.shape,\n",
" opset_version=args.opset_version,\n",
" show=args.show,\n",
" output_file=args.output_file,\n",
" verify=args.verify)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1117ba85",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

評価環境

  • Intel Mac
  • M1 Mac Lima 環境

lima setup

cat << 'EOF' > focal-amd64.yaml
arch: "x86_64"
images:
- location: "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img"
  arch: "x86_64"
cpus: 4
memory: 20GiB
mounts:
- location: "~"
  writable: true
EOF

limactl validate focal-amd64.yaml
limactl start --tty=false focal-amd64.yaml
limactl shell focal-amd64

Lima Install

sudo apt install python3.8-venv
sudo apt install python3-pip
python3 -m venv env
source env/bin/activate
sudo apt install gcc cmake
sudo apt install libgl1-mesa-dev ninja-build g++

ViTPose Setup

git clone https://github.com/open-mmlab/mmcv.git
cd mmcv
git checkout v1.3.9
MMCV_WITH_OPS=1 pip install -e .
cd ..
git clone https://github.com/ViTAE-Transformer/ViTPose.git
cd ViTPose
pip install -v -e .

pip install jupyter
pip install easydict
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment