Skip to content

Instantly share code, notes, and snippets.

@mats-claassen
Last active July 29, 2023 21:51
Show Gist options
  • Save mats-claassen/f76520dd32108b65d57113fd7ac99bf9 to your computer and use it in GitHub Desktop.
Save mats-claassen/f76520dd32108b65d57113fd7ac99bf9 to your computer and use it in GitHub Desktop.
Source code used in the blog at [blog.xmartlabs.com]
import numpy as np
import tensorflow as tf
from PIL import Image
from tensorflow import keras
import coremltools
import tfcoreml as tf_converter
import tensorflow.keras.backend as K
import datetime
import os
from tensorflow.python.framework import graph_io
from mnist import build_model
print("TensorFlow:", tf.__version__)
FROZEN_MODEL_FILE = 'frozen_model.pb'
OUTPUT_FILE = 'MNIST.mlmodel'
# Taken from https://stackoverflow.com/a/52823701/4708657
def freeze_graph(graph, session, output):
with graph.as_default():
graphdef_inf = tf.graph_util.remove_training_nodes(graph.as_graph_def())
graphdef_frozen = tf.graph_util.convert_variables_to_constants(session, graphdef_inf, output)
graph_io.write_graph(graphdef_frozen, ".", FROZEN_MODEL_FILE, as_text=False)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="mnist.tflite")
interpreter.allocate_tensors()
# tf.keras.backend.set_learning_phase(0)
input_shape = [28, 28, 1]
model = build_model(input_shape, interpreter)
# FREEZE GRAPH
session = tf.keras.backend.get_session()
INPUT_NODE = model.inputs[0].op.name
OUTPUT_NODE = model.outputs[0].op.name
freeze_graph(session.graph, session, [out.op.name for out in model.outputs])
# CONVERT TO COREML
tf_converter.convert(tf_model_path=FROZEN_MODEL_FILE,
mlmodel_path=OUTPUT_FILE,
output_feature_names=['Softmax:0'],
input_name_shape_dict={'input_1:0': [1, 28, 28, 1]},
image_input_names=['input_1:0'],
image_scale=1.0/255.0,
class_labels=["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"])
model = coremltools.models.MLModel(OUTPUT_FILE)
spec = model.get_spec()
# RENAME input and output tensors
coremltools.utils.rename_feature(spec, current_name='Softmax__0', new_name='Softmax')
coremltools.utils.rename_feature(spec, current_name='input_1__0', new_name='input')
model = coremltools.models.MLModel(spec)
model.save(OUTPUT_FILE)
import tensorflow as tf
import numpy as np
from tensorflow.python.keras.layers import Lambda
def dense(params):
X, W, b = params
return tf.add(tf.matmul(X, W), b)
def get_variable(interpreter, index, transposed=False):
var = interpreter.get_tensor(index)
# Weights might have to be transposed
if transposed:
var = np.transpose(var, (1, 0))
return var
def build_model(input_shape, interpreter):
W1 = get_variable(interpreter, 2, transposed=True)
b1 = get_variable(interpreter, 3)
W2 = get_variable(interpreter, 6, transposed=True)
b2 = get_variable(interpreter, 7)
inputs = tf.keras.layers.Input(shape=input_shape)
x_0 = tf.keras.layers.Flatten()(inputs)
x_1 = Lambda(dense)((x_0, W1, b1))
x_1relu = tf.nn.relu(x_1)
x_2 = Lambda(dense)((x_1relu, W2, b2))
x_softmax = tf.keras.activations.softmax(x_2)
return tf.keras.models.Model(inputs=inputs, outputs=[x_softmax])
import numpy as np
import tensorflow as tf
from PIL import Image
from mnist import build_model
from tensorflow import keras
print("TensorFlow:", tf.__version__)
def rel_error(x: np.ndarray, y: np.ndarray, eps: float = 1e-8) -> np.float64:
return np.max(np.abs(x - y) / (np.maximum(eps, np.abs(x) + np.abs(y))))
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="mnist.tflite")
interpreter.allocate_tensors()
input_shape = [28, 28]
model = build_model(input_shape, interpreter)
# get test image
zero_img_path = keras.utils.get_file(
'zero.png',
'https://storage.googleapis.com/khanhlvg-public.appspot.com/digit-classifier/zero.png'
)
img = Image.open(zero_img_path).convert('L')
img = img.resize((28, 28))
# Pre-process the image: Adding batch dimension and normalize the pixel value to [0..1]
input_image = np.expand_dims(np.array(img, dtype=np.float32) / 255.0, 0)
model.build(input_image.shape)
a = model.predict(input_image)
scores = np.reshape(a[0], [10])
interpreter.set_tensor(input_details[0]['index'], input_image)
# print(output_details)
interpreter.invoke()
tflite_scores = np.asarray(interpreter.get_tensor(output_details[0]['index'])[0])
print("TFlite", tflite_scores)
print("TF reimplemented", scores)
print("Error", rel_error(tflite_scores, scores))
# Code used to debug intermediate outputs
# return the intermediate result as second output in `mnist.py` and use the correct node index below
#
# with open("output/4_tensor.txt", "r") as file1:
# f_list = [float(i) for line in file1 for i in line.split(',') if i.strip()]
# tf_lite_output = np.reshape(np.asarray(f_list), a[1].shape)
#
# print("TFlite", tf_lite_output)
# print("TF reimplemented", a[1])
# print("Error", rel_error(tf_lite_output, a[1]))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment