Skip to content

Instantly share code, notes, and snippets.

@mbotsu

mbotsu/README.md Secret

Last active May 8, 2020 00:43
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mbotsu/8c712093884cb64f9477881cb182dbc2 to your computer and use it in GitHub Desktop.
Save mbotsu/8c712093884cb64f9477881cb182dbc2 to your computer and use it in GitHub Desktop.
Can Lightweight-OpenPose Pytorch-Model be used with SwiftOpenPose?

Introduction

Can Lightweight-OpenPose Pytorch-Model be used with SwiftOpenPose?
This page describes the modification script of the model conversion script, SwiftOpenPose.

Try Lightweight-OpenPose

Lightweight-OpenPose 564x360 size Result

lightweightOpenPose_564x360_result.jpg

Lightweight-OpenPose 360x360 size Result

If the image ignores the scale, the inference is poor.
Remember this result and look at the next.

lightweightOpenPose_360x360_result

CoreML Result

Yeah! Converted to CoreML-Model without major problems.

Lightweight-OpenPose to CoreML-Model 360x360 size Result

lightweightOpenPose_to_coreml_360x360_result

SwiftOpenPose Result

It was confirmed that it works with SwiftOpenPose.

SwiftOpenPose 360x360 Result

SwiftOpenpose_PAF_360x360_result

Create CoreML-Model

Test

edited Lightweight-OpenPose demo.py

import PIL.Image
import coremltools

def load_image(path, resize_to=None):
    # resize_to: (Width, Height)
    img = PIL.Image.open(path)
    if resize_to is not None:
        img = img.resize(resize_to, PIL.Image.ANTIALIAS)
    img_np = np.array(img).astype(np.float32)
    return img_np, img

_, img = load_image('./hadou.jpg', resize_to=(360, 360))

model = coremltools.models.MLModel('./small_model2.mlmodel')
out_dict = model.predict({'data': img})

stage2_heatmaps = out_dict['stage_1_output_1_heatmaps'].squeeze()
stage2_pafs = out_dict['stage_1_output_0_pafs'].squeeze()

heatmaps = np.transpose(stage2_heatmaps,(1, 2, 0))
pafs = np.transpose(stage2_pafs,(1, 2, 0))

heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)

CoreML-Model Download

References

# Reference: PyTorch to CoreML Cheat Sheet
# https://medium.com/@kuluum/pytroch-to-coreml-cheatsheet-fda57979b3c6
from onnx_coreml import convert
import coremltools
import coremltools.proto.FeatureTypes_pb2 as ft
scale = 1/256.
args = dict(is_bgr=True, image_scale = scale)
coreml_model = convert(model='small_model.onnx', image_input_names=['data'], preprocessing_args=args)
coreml_model.save("small_model.mlmodel")
spec = coreml_model.get_spec()
input = spec.description.input[0]
input.type.imageType.colorSpace = ft.ImageFeatureType.BGR
input.type.imageType.height = 360
input.type.imageType.width = 360
coremltools.utils.save_spec(spec, "small_model2.mlmodel")
diff --git a/SwiftOpenPose/Sources/ViewController.swift b/SwiftOpenPose/Sources/ViewController.swift
index 3c6b0d1..0dabbbe 100644
--- a/SwiftOpenPose/Sources/ViewController.swift
+++ b/SwiftOpenPose/Sources/ViewController.swift
@@ -3,12 +3,13 @@ import CoreML
import Vision
import Upsurge
+@available(iOS 13.0, *)
class ViewController: UIViewController {
// let model = coco_pose_368()
- let model = MobileOpenPose()
- let ImageWidth = 368
- let ImageHeight = 368
+ let model = LightWeightOpenPose()
+ let ImageWidth = 360
+ let ImageHeight = 360
@IBOutlet weak var imageView: UIImageView!
@@ -66,12 +67,17 @@ class ViewController: UIViewController {
func handleClassification(request: VNRequest, error: Error?) {
guard let observations = request.results as? [VNCoreMLFeatureValueObservation] else { fatalError() }
- let mlarray = observations[0].featureValue.multiArrayValue!
- let length = mlarray.count
- let doublePtr = mlarray.dataPointer.bindMemory(to: Double.self, capacity: length)
- let doubleBuffer = UnsafeBufferPointer(start: doublePtr, count: length)
- let mm = Array(doubleBuffer)
-
+ var pafs : [Float32] = []
+ var heatmaps : [Float32] = []
+ for ob in observations {
+ if ob.featureName == "stage_1_output_0_pafs" {
+ pafs = mlValToArray(ob.featureValue)
+ }
+ if ob.featureName == "stage_1_output_1_heatmaps" {
+ heatmaps = mlValToArray(ob.featureValue)
+ }
+ }
+ let mm = Array<Double>(heatmaps.compactMap{Double($0)} + pafs.compactMap{Double($0)} )
drawLine(mm)
}
@@ -87,6 +93,14 @@ class ViewController: UIViewController {
}
}
+ func mlValToArray (_ featureValue: MLFeatureValue) -> [Float32] {
+ let mlarray = featureValue.multiArrayValue!
+ let length = mlarray.count
+ let doublePtr = mlarray.dataPointer.bindMemory(to: Float32.self, capacity: length)
+ let doubleBuffer = UnsafeBufferPointer(start: doublePtr, count: length)
+ return Array<Float32>(doubleBuffer)
+ }
+
func drawLine(_ mm: Array<Double>){
let com = PoseEstimator(ImageWidth,ImageHeight)
@dhgokul
Copy link

dhgokul commented May 7, 2020

@mbotsu How much process speed in mobile device?

@mbotsu
Copy link
Author

mbotsu commented May 8, 2020

@dhgokul speed is not measured, only operation check.
Lightweight-OpenPose is mobilenet v1.
slower than mobilenetv2 but accurate.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment