Skip to content

Instantly share code, notes, and snippets.

@JustinShenk
Created December 9, 2018 18:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save JustinShenk/9917891c0433f33967f6e8cd8fcaa49a to your computer and use it in GitHub Desktop.
Save JustinShenk/9917891c0433f33967f6e8cd8fcaa49a to your computer and use it in GitHub Desktop.
Neural style transfer with OpenVINO and webcam
#!/usr/bin/env python
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
import cv2
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IEPlugin
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
"impl.", type=str, default=None)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
type=str)
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
parser.add_argument("--mean_val_r", "-mean_val_r",
help="Mean value of red chanel for mean value subtraction in postprocessing ", default=0,
type=float)
parser.add_argument("--mean_val_g", "-mean_val_g",
help="Mean value of green chanel for mean value subtraction in postprocessing ", default=0,
type=float)
parser.add_argument("--mean_val_b", "-mean_val_b",
help="Mean value of blue chanel for mean value subtraction in postprocessing ", default=0,
type=float)
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
if args.cpu_extension and 'CPU' in args.device:
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
if "CPU" in plugin.device:
supported_layers = plugin.get_supported_layers(net)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
format(plugin.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
log.info("Preparing input blobs")
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
net.batch_size = 1 # One frame per inference
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
log.info("Batch size is {}".format(n))
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = plugin.load(network=net)
del net
cap = cv2.VideoCapture(0)
cap.set(3,320)
cap.set(4,240)
ret, image = cap.read()
# Start sync inference
log.info("Starting inference ({} iterations)".format(args.number_iter))
step = 0
while ret:
step+=1
ret, image = cap.read()
if image.shape[:-1] != (h, w):
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[0] = image
infer_time = []
for i in range(args.number_iter):
t0 = time()
res = exec_net.infer(inputs={input_blob: images})
infer_time.append((time() - t0) * 1000)
log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
if args.perf_counts:
perf_counts = exec_net.requests[0].get_perf_counts()
log.info("Performance counters:")
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
for layer, stats in perf_counts.items():
print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
# Processing output blob
log.info("Processing output blob")
res = res[out_blob]
# Post process output
for batch, data in enumerate(res):
# Clip values to [0, 255] range
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 0, 1)
data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
data[data < 0] = 0
data[data > 255] = 255
data = data[::] - (args.mean_val_r, args.mean_val_g, args.mean_val_b)
data /= 255
cv2.imshow('Video', data)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
del exec_net
del plugin
if __name__ == '__main__':
sys.exit(main() or 0)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment