Skip to content

Instantly share code, notes, and snippets.

View ozgurshn's full-sized avatar

Ozgur Sahin ozgurshn

View GitHub Profile
@ozgurshn
ozgurshn / PlaygroundModel.swift
Created June 2, 2020 07:58
Playground Core ML model loading
/// URL of model assuming it was installed in the same bundle as this class
// class var urlOfModelInThisBundle : URL {
// let bundle = Bundle(for: ObjectDetectionAccurate.self)
// return bundle.url(forResource: "ObjectDetectionAccurate", withExtension:"mlmodelc")!
// }
class var urlOfModelInThisBundle: URL {
return try! MLModel.compileModel(at: URL(fileURLWithPath:"/Users/ozgur/Library/Autosave Information/ThermDetector.playground/Resources/ObjectDetectionAccurate.mlmodel"))
}
@ozgurshn
ozgurshn / RectangleObservationProjection.swift
Created May 28, 2020 13:41
Vision normalized rectangle detection projection in Swift
func observationToRect(box:VNRectangleObservation)->CGRect
{
let xCord = box.topLeft.x * imageView.frame.size.width
let yCord = (1 - box.topLeft.y) * imageView.frame.size.height
let width = (box.topRight.x - box.bottomLeft.x) * imageView.frame.size.width
let height = (box.topLeft.y - box.bottomLeft.y) * imageView.frame.size.height
return CGRect(x: xCord, y: yCord, width: width, height: height)
}
@ozgurshn
ozgurshn / perspectiveForRectangle.swift
Created May 26, 2020 10:37
Apply perpective transform to the Vision rectangle observation
guard let rectangle = request?.results?.first as? VNRectangleObservation else {
guard let error = error else { return }
print("Error: Rectangle detection failed - Vision request returned an error. \(error.localizedDescription)")
return
}
guard let filter = CIFilter(name: "CIPerspectiveCorrection") else {
print("Error: Rectangle detection failed - Could not create perspective correction filter.")
return
}
let width = CGFloat(CVPixelBufferGetWidth(currentCameraImage))
@ozgurshn
ozgurshn / Terms of use
Last active April 25, 2020 12:11
Scan and Colorize Your Photos terms of use
Scan and Colorize Your Photos terms of use
Please find the legal information and terms and conditions of the Scan and Colorize Your Photos service, here below
IMPORTANT: BY USING THE SERVICES (AS DEFINED BELOW) YOU INDICATE YOUR ACCEPTANCE OF THE FOLLOWING TERMS AND CONDITIONS. IF YOU DO NOT ACCEPT THE TERMS AND CONDITIONS SET FORTH HEREIN, YOU MUST NOT USE THE SERVICES. PLEASE READ THESE TERMS OF SERVICE CAREFULLY AS THEY CONTAIN IMPORTANT INFORMATION ABOUT YOUR LEGAL RIGHTS, REMEDIES AND OBLIGATIONS.
Welcome to Scan and Colorize Your Photos where you can scan your old photo albums!
Acceptance of Terms
The following instrument consists of the terms and conditions governing your ("you") access to and use of the application known as "Scan and Colorize Your Photoss" and the content, features, software, related technology and services therein (collectively, the "Services"). These Scan and Colorize Your Photos Terms of Use together with the Privacy Policy (collectively, the "Terms") constitute a binding agreem
@ozgurshn
ozgurshn / predictionOnCoreML.py
Last active September 4, 2020 08:38
Prediction on Core ML model with PIL image input
##https://github.com/apple/coremltools/blob/master/examples/APIExamples.md
import coremltools
import numpy as np
import PIL.Image
# load a model whose input type is "Image"
model = coremltools.models.MLModel('path/to/the/saved/model.mlmodel')
Height = 20 # use the correct input image height
Width = 60 # use the correct input image width
@ozgurshn
ozgurshn / quantize_model.py
Created April 10, 2020 13:57
Quantize the weights of CoreML model
##https://heartbeat.fritz.ai/advanced-tips-for-core-ml-77c9e0231a9
from coremltools.models.neural_network import quantization_utils
def quantize_model(mlmodel, nbits, method='linear'):
"""Quantize the weights of an mlmodel to a specific number of bits.
Args:
mlmodel (coremltools.models.MLModel): A Core ML model
nbits (int): the bit depth of the quantized weights.
method (string): the quantization method.
@ozgurshn
ozgurshn / make_mlmodel_flexible.py
Created April 10, 2020 13:56
MAke CoreML model input and output size flexible
##https://heartbeat.fritz.ai/advanced-tips-for-core-ml-77c9e0231a9
from coremltools.models.neural_network import flexible_shape_utils
def make_mlmodel_flexible(spec, size_range=(100, 1920):
"""Make input and output sizes of a Core ML model flexible.
Args:
spec (NeuralNetwork_pb2): a Core ML neural network spec
size_range ([Int]): a tuple containing the min and max input sizes.
"""
size_range_spec = flexible_shape_utils.NeuralNetworkImageSizeRange()
@ozgurshn
ozgurshn / add_reflective_padding_and_crop.py
Last active April 10, 2020 13:55
add_reflective_padding_and_crop add layer manually in Core ML
##https://heartbeat.fritz.ai/advanced-tips-for-core-ml-77c9e0231a9
def add_reflective_padding_and_crop(mlmodel, padding_size=20):
"""Add reflective padding and crop layers to remove edge artifcats.
Because the convolution layers rely on 'same' padding, stylized images have
a small ring of distortion around the outer edge. This can be eliminated
with reflective padding on the input image. This method modifies the
original MLModel spec to add a padding layer after the input and a crop
layer before the output to remove the padding at the end.
Args:
mlmodel (coremltools.models.MLModel): an MLModel spec.
@ozgurshn
ozgurshn / perspective.swift
Created March 16, 2020 11:48
Perspective transform to rectangle detected by Vision
guard let results = request.results as? [VNRectangleObservation] else {
fatalError("Unexpected result type from VNDetectRectanglesRequest")
}
// Need check results only if objectsCountToDetect isn't default
if objectsCountToDetect != defaultObjectsCount {
guard results.count == objectsCountToDetect else {
Logger.e("Wrong Rectangulars Results count: Founded - \(results.count) | Required - \(objectsCountToDetect)")
return
}
}
@ozgurshn
ozgurshn / imageviewtansition.swift
Created February 26, 2020 01:25
UIimageview transition animation
//https://stackoverflow.com/questions/7638831/fade-dissolve-when-changing-uiimageviews-image/38350024#38350024
let toImage = UIImage(named:"myname.png")
UIView.transition(with: self.imageView,
duration: 0.3,
options: .transitionCrossDissolve,
animations: {
self.imageView.image = toImage
},
completion: nil)