Skip to content

Instantly share code, notes, and snippets.

Jameson Toole jamesonthecrow

View GitHub Profile
View one_layer_models.py
# Create a model with a normal convolution.
inpt = keras.layers.Input(shape=(500, 500, 3))
out = keras.layers.Conv2D(10, 10)(inpt)
model = keras.models.Model(inpt, out)
mlmodel = coremltools.converters.keras.convert(model)
mlmodel.save('convolution.mlmodel')
# Create a model with a dialted (atrous) convolution.
inpt = keras.layers.Input(shape=(500, 500, 3))
out = keras.layers.Conv2D(10, 10, dilation_rate=4)(inpt)
View ANETester.swift
// PUT A BREAKPOINT HERE
// Compile the model.
let compiledModelURL = try! MLModel.compileModel(at: assetPath!)
// Initialize the model for use on a specific set of hardware
let config = MLModelConfiguration()
config.computeUnits = .all // can be .all, .cpuAndGPU, or .cpuOnly
let mlmodel = try! MLModel(contentsOf: compiledModelURL, configuration: config)
View quantize_mlmodel.py
from coremltools.models.neural_network import quantization_utils
def quantize_model(mlmodel, nbits, method='linear'):
"""Quantize the weights of an mlmodel to a specific number of bits.
Args:
mlmodel (coremltools.models.MLModel): A Core ML model
nbits (int): the bit depth of the quantized weights.
method (string): the quantization method.
View make_mlmodel_flexible.py
from coremltools.models.neural_network import flexible_shape_utils
def make_mlmodel_flexible(spec, size_range=(100, 1920):
"""Make input and output sizes of a Core ML model flexible.
Args:
spec (NeuralNetwork_pb2): a Core ML neural network spec
size_range ([Int]): a tuple containing the min and max input sizes.
"""
size_range_spec = flexible_shape_utils.NeuralNetworkImageSizeRange()
View add_reflective_padding.py
def add_reflective_padding_and_crop(mlmodel, padding_size=20):
"""Add reflective padding and crop layers to remove edge artifcats.
Because the convolution layers rely on 'same' padding, stylized images have
a small ring of distortion around the outer edge. This can be eliminated
with reflective padding on the input image. This method modifies the
original MLModel spec to add a padding layer after the input and a crop
layer before the output to remove the padding at the end.
Args:
View replace_instance_norm_layer.py
instance_norm_spec = create_instance_normalization_spec(keras_model.layers[-1])
# Hook the layer up to the global model input and output
instance_norm_spec.input[:] = ["input1"]
instance_norm_spec.output[:] = ["output1"]
# Replace the custom layer placeholder with the new instance norm layer
mlmodel._spec.neuralNetwork.layers[-1].CopyFrom(instance_norm_spec)
mlmodel.get_spec()
"""
View create_instance_normalization_spec.py
def create_instance_normalization_spec(layer):
"""Convert a DeprocessStylizedImage Keras layer to Core ML.
Args:
layer (keras.layers.Layer): An Instance Normalization Keras layer.
Returns:
spec (NeuralNetwork_pb2.NeuralNetworkLayer): a core ml layer spec
"""
View instance_normalization.py
# Instance Norm
inpt = keras.layers.Input(shape=(500, 500, 3))
out = keras_contrib.layers.InstanceNormalization(axis=-1)(inpt)
keras_model = keras.models.Model(inpt, out)
mlmodel = coremltools.converters.keras.convert(
keras_model,
add_custom_layers=True,
custom_conversion_functions={}
)
View GetParts.java
// Get the first pose
Pose pose = poseResult.getPoses().get(0);
// Get the body keypoints
Keypoints[] keypoints = pose.getKeypoints();
// Get the name of the keypoint
String partName = keypoints[0].getPartName();
PointF keypointPoisition = keypoints[0].getPosition()
View CameraActivity.java
public class CameraActivity extends Activity implements ImageReader.OnImageAvailableListener {
// The rest of your activity
@Override
public void onImageAvailable(final ImageReader reader) {
// The FritzVisionImage class makes it easy to manipulate images used as model inputs.
Image image = reader.acquireLatestImage();
final FritzVisionImage fritzVisionImage = FritzVisionImage.fromMediaImage(image, imageRotation);
image.close();
You can’t perform that action at this time.