Skip to content

Instantly share code, notes, and snippets.

@abrampers
Created March 20, 2019 08:49
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save abrampers/c69e35cf3e09fdc8aeba74a1ba2e812f to your computer and use it in GitHub Desktop.
Save abrampers/c69e35cf3e09fdc8aeba74a1ba2e812f to your computer and use it in GitHub Desktop.
//: A UIKit based Playground for presenting user interface
import UIKit
import PlaygroundSupport
import CoreML
class StyleTransferInput : MLFeatureProvider {
var input: CVPixelBuffer
var featureNames: Set<String> {
get {
return ["img_placeholder__0"]
}
}
func featureValue(for featureName: String) -> MLFeatureValue? {
if (featureName == "img_placeholder__0") {
return MLFeatureValue(pixelBuffer: input)
}
return nil
}
init(input: CVPixelBuffer) {
self.input = input
}
}
private func pixelBuffer(cgImage: CGImage, width: Int, height: Int) -> CVPixelBuffer {
var pixelBuffer: CVPixelBuffer? = nil
let status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32BGRA , nil, &pixelBuffer)
if status != kCVReturnSuccess {
fatalError("Cannot create pixel buffer for image")
}
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags.init(rawValue: 0))
let data = CVPixelBufferGetBaseAddress(pixelBuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.noneSkipFirst.rawValue)
let context = CGContext(data: data, width: width, height: height, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: bitmapInfo.rawValue)
context?.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height))
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer!
}
func resizeImage(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / size.width
let heightRatio = targetSize.height / size.height
// Figure out what our orientation is, and use that to form the rectangle
var newSize: CGSize
if(widthRatio < heightRatio) {
newSize = CGSize(width: size.width * heightRatio, height: size.height * heightRatio)
} else {
newSize = CGSize(width: size.width * widthRatio, height: size.height * widthRatio)
}
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
func centerCrop(image: UIImage, width: Double, height: Double) -> UIImage {
let cgimage = image.cgImage!
let contextImage: UIImage = UIImage(cgImage: cgimage)
let contextSize: CGSize = contextImage.size
var posX: CGFloat = 0.0
var posY: CGFloat = 0.0
let cgwidth: CGFloat = CGFloat(width)
let cgheight: CGFloat = CGFloat(height)
// Crop the center part
let centerX = contextSize.width / 2
let centerY = contextSize.height / 2
posX = centerX - (cgwidth / 2)
posY = centerY - (cgheight / 2)
let rect: CGRect = CGRect(x: posX, y: posY, width: cgwidth, height: cgheight)
// Create bitmap image from context using the rect
let imageRef: CGImage = cgimage.cropping(to: rect)!
// Create a new image based on the imageRef and rotate back to the original orientation
let image: UIImage = UIImage(cgImage: imageRef, scale: image.scale, orientation: image.imageOrientation)
return image
}
func stylizeImage(cgImage: CGImage, model: MLModel) -> CGImage {
// size can change here if you want, remember to run right sizes in the fst evaluating script
let input = StyleTransferInput(input: pixelBuffer(cgImage: cgImage, width: 883, height: 720))
// model.prediction will run the style model on input image
let options = MLPredictionOptions()
options.usesCPUOnly = true
let outFeatures = try! model.prediction(from: input, options: options)
// we get the image buffer after
let output = outFeatures.featureValue(for: "add_37__0")!.imageBufferValue!
// remaining code to convert image buffer here .....
CVPixelBufferLockBaseAddress(output, .readOnly)
let width = CVPixelBufferGetWidth(output)
let height = CVPixelBufferGetHeight(output)
let data = CVPixelBufferGetBaseAddress(output)!
let outContext = CGContext(data: data,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(output),
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageByteOrderInfo.order32Little.rawValue | CGImageAlphaInfo.noneSkipFirst.rawValue)!
let outImage = outContext.makeImage()!
CVPixelBufferUnlockBaseAddress(output, .readOnly)
return outImage
}
let resizedImage = resizeImage(image: UIImage(named: "bali-pagoda.jpg")!, targetSize: CGSize(width: 884, height: 720))
let croppedImg = centerCrop(image: resizedImage, width: 884, height: 720)
let result = stylizeImage(cgImage: croppedImg.cgImage!, model: udnie().model)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment