Last active
July 24, 2017 23:46
-
-
Save mackoj/309950aba9dd37df53d9b7d8a575e6a3 to your computer and use it in GitHub Desktop.
Simple to test to Pixellate face using CoreImage
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// | |
// ViewController.swift | |
// localreporter | |
// | |
// Created by Jeffrey Macko on 15/07/2017. | |
// Copyright © 2017 Jeffrey Macko. All rights reserved. | |
// | |
import UIKit | |
class ViewController: UIViewController { | |
@IBOutlet weak var imageSegmentControlSelector: UISegmentedControl! | |
@IBOutlet weak var outputImageView: UIImageView! | |
@IBOutlet weak var inputImageView: UIImageView! | |
@IBOutlet weak var debugTimeLabel: UILabel! | |
@IBOutlet weak var nbFacesLabel: UILabel! | |
var lastIdx = 0 | |
override func viewWillAppear(_ animated: Bool) { | |
super.viewWillAppear(animated) | |
updateImage() | |
} | |
@IBAction func updatePhoto(_ sender: UISegmentedControl) { | |
self.debugTimeLabel.text = "\(0)" | |
self.nbFacesLabel.text = "\(0)" | |
updateImage() | |
} | |
func getImage() -> UIImage { | |
if self.imageSegmentControlSelector.selectedSegmentIndex == 0 { | |
return #imageLiteral(resourceName: "IMG_0287_pix") | |
} else if self.imageSegmentControlSelector.selectedSegmentIndex == 1 { | |
return #imageLiteral(resourceName: "IMG_0299_pix") | |
} else if self.imageSegmentControlSelector.selectedSegmentIndex == 2 { | |
return #imageLiteral(resourceName: "20170724_182716_pix") | |
} | |
return #imageLiteral(resourceName: "peta-4_pix") | |
} | |
fileprivate func updateImage() { | |
let image = getImage() | |
if let cgImage = image.cgImage { | |
let completionBlock : (CGImage, Int, DispatchTime)->() = { (modifiedImage, nbFaces, start) in | |
self.outputImageView.image = UIImage(cgImage: modifiedImage) | |
let end = DispatchTime.now() | |
let nanoTime = end.uptimeNanoseconds - start.uptimeNanoseconds | |
let timeInterval = Double(nanoTime) / 1_000_000_000 | |
self.debugTimeLabel.text = "\(timeInterval)" | |
self.nbFacesLabel.text = "\(nbFaces)" | |
debugPrint("End Completion: \(timeInterval)") | |
} | |
self.inputImageView.image = UIImage(cgImage: cgImage) | |
if lastIdx == 1 { | |
FaceRemover.removeFacesAsynciOS10(inputImage: cgImage, orientation: image.imageOrientation, completion: completionBlock) | |
} else if lastIdx == 2 { | |
FaceRemover.removeFacesAsynciOS11(inputImage: cgImage, orientation: image.imageOrientation, completion: completionBlock) | |
} | |
} | |
} | |
@IBAction func segmentedControlValueDIdCHanged(_ segmentedControl: UISegmentedControl) { | |
lastIdx = segmentedControl.selectedSegmentIndex | |
self.debugTimeLabel.text = "\(0)" | |
self.nbFacesLabel.text = "\(0)" | |
if segmentedControl.selectedSegmentIndex == 0 { | |
freeImages() | |
} else { | |
updateImage() | |
} | |
} | |
fileprivate func freeImages() { | |
self.inputImageView.image = nil | |
self.outputImageView.image = nil | |
} | |
override func didReceiveMemoryWarning() { | |
super.didReceiveMemoryWarning() | |
freeImages() | |
} | |
} | |
// MARK : A partir d'ici c'est interessant | |
import CoreImage | |
import ImageIO | |
import Vision | |
struct FaceRemover { | |
// Context du renderer | |
static let context = CIContext() | |
// Genere un bout de la photo en mode pixelliser | |
private static func applyPixellateOnTheBoundsOfTheFeature(inputImage : CIImage, bounds : CGRect) -> CIImage? { | |
debugPrint("👾", terminator: "") | |
guard | |
let filter = CIFilter(name: "CIPixellate") | |
else { | |
debugPrint("Echec Pixellate") | |
return nil } | |
// on extrait le visage | |
let partialImage = inputImage.cropped(to: bounds) | |
// pixel size calcultion | |
let air = bounds.size.height * bounds.size.width | |
let pixelSize = ((sqrt(air) * 20) / 100) // on veut decouper le visage en un plateau 5*5 pixel geant (donc 20%) | |
debugPrint("pixelSize: \(pixelSize)") | |
// vector calculs | |
let center = CIVector(x: bounds.size.height / 2.0, y: bounds.size.width / 2.0) | |
debugPrint("center: \(center)") | |
// configuration du filtre | |
filter.setDefaults() | |
filter.setValue("\(pixelSize)", forKey: kCIInputScaleKey) | |
filter.setValue(center, forKey: kCIInputCenterKey) | |
filter.setValue(partialImage, forKey: kCIInputImageKey) | |
// on genere l'output | |
guard let image = filter.value(forKey: kCIOutputImageKey) as? CIImage else { | |
debugPrint("Echec applying filter") | |
return nil | |
} | |
return image | |
} | |
// merge les visage pixelliser avec le reste de la photo | |
private static func mergeFilteredImagesWithInputImage(inputImage : CIImage, filteredImageToApply : [CIImage]) -> CIImage { | |
debugPrint(" Merging", separator: "", terminator: "") | |
// on va cummuler les images les unes sur les autres afin d'avoir le rendu souhaiter | |
let finalImage = filteredImageToApply.reduce(inputImage) { (r, filter) in | |
filter.composited(over: r) | |
} | |
return finalImage | |
} | |
private static func convertCIImageTOCGImage(input : CIImage) -> CGImage? { | |
debugPrint(" Convertion en CGImage") | |
// generation de la nouvelle CGImage | |
let image = FaceRemover.context.createCGImage(input, from: input.extent) | |
if image == nil { | |
debugPrint("Echec createCGImage") | |
} | |
return image | |
} | |
// Pixellise les visages d'une photo | |
// plus il y a des visages plus c'est long... | |
static func removeFacesAsynciOS10(inputImage cgInputImage : CGImage, orientation : UIImageOrientation, onQueue : DispatchQueue = DispatchQueue.main, completion : @escaping (CGImage, Int, DispatchTime)->()) { | |
let start = DispatchTime.now() | |
onQueue.async { | |
let inputImage = CIImage(cgImage: cgInputImage) | |
var finalImage : CGImage? = nil | |
var nbFaces = 0 | |
// creation du faceDetector | |
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: FaceRemover.context, options: [CIDetectorAccuracy : CIDetectorAccuracyHigh]) | |
// il faut re-aligner l'orientation de l'image afin que cela fonctionne correctement | |
// j'ai mis plus longtemps a comprend comment faire fonctionner proprement l'orientation que tout le reste du code... :/ | |
let option : [String : Any]? = [ CIDetectorImageOrientation : orientation.cgImagePropertyOrientation().rawValue] | |
// si il y a besoin on pixellate la tronche des gens | |
if let faces = faceDetector?.features(in: inputImage, options: option) { | |
nbFaces = faces.count | |
debugPrint("faces.count: \(nbFaces)") | |
if faces.count > 0 { | |
let images = faces.flatMap({ FaceRemover.applyPixellateOnTheBoundsOfTheFeature(inputImage: inputImage, bounds: $0.bounds) }) | |
let outputImage = FaceRemover.mergeFilteredImagesWithInputImage(inputImage: inputImage, filteredImageToApply: images) | |
finalImage = FaceRemover.convertCIImageTOCGImage(input: outputImage) | |
} | |
} | |
// on renvoit toujours une image soit l'image generer soit l'image passer en input | |
let res = finalImage ?? cgInputImage | |
let end = DispatchTime.now() | |
let nanoTime = end.uptimeNanoseconds - start.uptimeNanoseconds | |
let timeInterval = Double(nanoTime) / 1_000_000_000 | |
debugPrint("End removeFacesAsync: \(timeInterval)") | |
let startNext = DispatchTime.now() | |
DispatchQueue.main.async { | |
completion(res, nbFaces, startNext) | |
} | |
} | |
} | |
// Pixellise les visages d'une photo | |
// plus il y a des visages plus c'est long... | |
static func removeFacesAsynciOS11(inputImage cgInputImage : CGImage, orientation : UIImageOrientation, onQueue : DispatchQueue = DispatchQueue.main, completion : @escaping (CGImage, Int, DispatchTime)->()) { | |
let start = DispatchTime.now() | |
onQueue.async { | |
let inputImage = CIImage(cgImage: cgInputImage) | |
var finalImage : CGImage? = nil | |
var nbFaces = 0 | |
let imageRequestHandler = VNImageRequestHandler(cgImage: cgInputImage, orientation: orientation.cgImagePropertyOrientation(), options: [:]) | |
let faceRectanglesRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in | |
if (error != nil) { | |
debugPrint(error!) | |
} else { | |
nbFaces = (request.results != nil) ? request.results!.count : 0 | |
debugPrint("faces.count: \(nbFaces)") | |
if let faces = request.results as? [VNFaceObservation], faces.count > 0 { | |
let images = faces.flatMap({ (aFace) -> CIImage? in | |
let rectX = aFace.boundingBox.origin.x * CGFloat(cgInputImage.width) | |
let rectY = aFace.boundingBox.origin.y * CGFloat(cgInputImage.height) | |
let rectWidth = aFace.boundingBox.size.width * CGFloat(cgInputImage.width) | |
let rectHeight = aFace.boundingBox.size.height * CGFloat(cgInputImage.height) | |
let rect = CGRect(x: rectX, y:rectY, width: rectWidth, height: rectHeight) | |
return FaceRemover.applyPixellateOnTheBoundsOfTheFeature(inputImage: inputImage, bounds: rect) | |
}) | |
let outputImage = FaceRemover.mergeFilteredImagesWithInputImage(inputImage: inputImage, filteredImageToApply: images) | |
finalImage = FaceRemover.convertCIImageTOCGImage(input: outputImage) | |
} | |
} | |
// on renvoit toujours une image soit l'image generer soit l'image passer en input | |
let res = finalImage ?? cgInputImage | |
let end = DispatchTime.now() | |
let nanoTime = end.uptimeNanoseconds - start.uptimeNanoseconds | |
let timeInterval = Double(nanoTime) / 1_000_000_000 | |
debugPrint("End removeFacesAsync: \(timeInterval)") | |
let startNext = DispatchTime.now() | |
DispatchQueue.main.async { | |
completion(res, nbFaces, startNext) | |
} | |
}) | |
do { | |
try imageRequestHandler.perform([faceRectanglesRequest]) | |
} catch { | |
debugPrint(error) | |
} | |
} | |
} | |
} | |
extension UIImageOrientation { | |
// sa fonctionne on n'y touche plus... :D | |
func cgImagePropertyOrientation() -> CGImagePropertyOrientation { | |
switch self { | |
case .up: return CGImagePropertyOrientation.up | |
case .down: return CGImagePropertyOrientation.down | |
case .left: return CGImagePropertyOrientation.left | |
case .right: return CGImagePropertyOrientation.right | |
case .upMirrored: return CGImagePropertyOrientation.upMirrored | |
case .downMirrored: return CGImagePropertyOrientation.downMirrored | |
case .leftMirrored: return CGImagePropertyOrientation.leftMirrored | |
case .rightMirrored: return CGImagePropertyOrientation.rightMirrored | |
} | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment