Skip to content

Instantly share code, notes, and snippets.

🤓
Did you do good today?

Mirosław Stanek frogermcs

🤓
Did you do good today?
Block or report user

Report or block frogermcs

Hide content and notifications from this user.

Learn more about blocking users

Contact Support about this user’s behavior.

Learn more about reporting abuse

Report abuse
View GitHub Profile
View load_tf.py
# Load TensorFlow frozen model
TF_FROZEN_MODEL = "mobilenet_v2_1.0_224_frozen.pb"
with open(TF_FROZEN_MODEL, 'rb') as f:
serialized_model = f.read()
tf.reset_default_graph()
graph_definition = tf.GraphDef()
graph_definition.ParseFromString(serialized_model)
with tf.Graph().as_default() as g:
tf.import_graph_def(graph_definition, name='')
View preprocess_to_tf.py
# Prepare images for TensorFlow requirements
#Convert to expected type: float
img_laptop_tf = np.array(img_laptop).astype(np.float32)
# Setup expected input shape: [1,224,224,3]
img_laptop_tf = np.expand_dims(img_laptop_tf, axis = 0)
# Convert to expected values ranges: [0, 1]
img_laptop_tf = (1.0/255.0) * img_laptop_tf
print( 'Image shape:', img_laptop_tf.shape)
View load_labels.py
LABELS_FILE = 'ImageNetLabels.txt'
with open(LABELS_FILE) as f:
labels = f.read().splitlines()
View coreml_prediction_output
Prediction for Golden Retriever: golden retriever 0.5627079010009766
Prediction for laptop: laptop 0.42153415083885193
View coreml_prediction.py
INPUT_TENSOR = "input__0"
OUTPUT_TENSOR = "MobilenetV2__Predictions__Reshape_1__0"
PREDICTED_FEATURE_NAME = "classLabel"
# Prediction is run on CPU
coreml_output_golden = mlmodel.predict({INPUT_TENSOR: img_golden}, useCPUOnly=True)
#Prediction is run on GPU
coreml_output_laptop = mlmodel.predict({INPUT_TENSOR: img_laptop}, useCPUOnly=False)
View preprocess.py
img_golden = img_golden.resize([224,224], PIL.Image.ANTIALIAS)
img_laptop = img_laptop.resize([224,224], PIL.Image.ANTIALIAS)
View coreml_model.py
# Load previously saved CoreML model of MobileNet v2
mlmodel = coremltools.models.MLModel('mobilenet_v2_1.0_224.mlmodel')
# Get spec from the model
spec = mlmodel.get_spec()
print(spec.description)
# Output
# >> input {
# >> name: "input__0"
View load_images.py
img_laptop_url = "https://upload.wikimedia.org/wikipedia/commons/9/90/ThinkPad_X220.jpg"
img_laptop = PIL.Image.open(BytesIO(requests.get(img_laptop_url).content))
imshow(np.asarray(img_laptop))
img_golden_url = "https://upload.wikimedia.org/wikipedia/commons/9/93/Golden_Retriever_Carlos_%2810581910556%29.jpg"
img_golden = PIL.Image.open(BytesIO(requests.get(img_golden_url).content))
imshow(np.asarray(img_golden))
View coreml_imports.py
%matplotlib inline #will be useful for images preview
from matplotlib.pyplot import imshow
import tensorflow as tf
import coremltools
#For easier images processing
import numpy as np
import PIL
import requests
View coreml_output
Core ML model generated. Saved at location: mobilenet_v2_1.0_224.mlmodel
Core ML input(s):
[name: "input__0"
type {
imageType {
width: 224
height: 224
colorSpace: RGB
}
You can’t perform that action at this time.