Skip to content

Instantly share code, notes, and snippets.

@vmarkovtsev
Last active November 27, 2019 21:59
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save vmarkovtsev/f7236361bdc6939f6c85a24f5cb4a340 to your computer and use it in GitHub Desktop.
Save vmarkovtsev/f7236361bdc6939f6c85a24f5cb4a340 to your computer and use it in GitHub Desktop.
# Remember to remove the old model!
# rm *.tflite
import subprocess
from tensorflow.lite.python.interpreter import load_delegate
def create_motion_blur_func_edgetpu(images_shape, dim, angle, edgetpu=True):
name = "motion_blur_%s_%d_%.2f" % ("_".join(map(str, images_shape)), dim, angle)
ctor = lambda: create_motion_blur_func(images_shape, dim, angle)
return create_func_edgetpu(images_shape, ctor, name, edgetpu=edgetpu)
def create_func_edgetpu(images_shape: Tuple[int], ctor: callable, name: str, edgetpu=True):
"""Returns the callable which works the same as ctor()'s result but on Edge TPU or vanilla TF Lite."""
log = logging.getLogger(name)
log.setLevel(logging.INFO)
fn = "%s%s.tflite" % (name, "_edgetpu" if edgetpu else "")
if not Path(fn).exists():
log.info("Creating the regular TensorFlow kernel")
func = ctor()
generate_edgetpu_model(log, images_shape, func, name)
log.info("Loading the %s model", "Edge TPU" if edgetpu else "patched TensorFlow Lite")
interpreter = tf.lite.Interpreter(
model_path=fn, experimental_delegates=[load_delegate("libedgetpu.so.1.0")])
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
tensor_in = interpreter.tensor(input_details[0]["index"])
tensor_out = interpreter.tensor(output_details[0]["index"])
def invoke(images):
tensor_in()[:] = images
interpreter.invoke()
return tensor_out().copy()
return invoke
def generate_edgetpu_model(log: logging.Logger, images_shape: Tuple[int], func: callable, name: str):
"""Convert tf.function to Edge TPU model."""
def gen_input_samples():
yield [np.zeros(images_shape, np.float32)]
yield [np.ones(images_shape, np.float32) * 255]
log.info("Generating the quantized TensorFlow Lite model")
converter = tf.lite.TFLiteConverter.from_concrete_functions([func.get_concrete_function()])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = gen_input_samples
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model = converter.convert()
fn = "%s.tflite" % name
with open(fn, "wb") as fout:
fout.write(tflite_model)
log.info("Wrote it to %s", fn)
log.info("Compiling the Edge TPU model")
echo_run("edgetpu_compiler", "-s", fn)
Path(fn).with_name(Path(fn).stem + "_edgetpu.log").unlink()
def echo_run(*cmd):
"""Execute an arbitrary command and echo its output."""
p = subprocess.run(list(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.stdout.decode()
if output:
print(output)
p.check_returncode()
motion_blur = create_motion_blur_func_edgetpu(images.shape, 25, (90/180)*np.pi)
save_image(motion_blur(images), "result.jpg")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment