Skip to content

Instantly share code, notes, and snippets.

@noppefoxwolf
Last active August 13, 2019 12:29
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save noppefoxwolf/96fc42a2b3c3e518f629ad2b5e8018c7 to your computer and use it in GitHub Desktop.
Save noppefoxwolf/96fc42a2b3c3e518f629ad2b5e8018c7 to your computer and use it in GitHub Desktop.
Easy use CIDetector.
//
// FaceDetector.swift
//
// Created by Tomoya Hirano on 2016/05/21.
// Copyright © 2016年 Tomoya Hirano. All rights reserved.
//
import UIKit
struct Face {
private(set) var rect: CGRect!
private(set) var mouth: CGPoint? = nil
private(set) var leftEye: CGPoint? = nil
private(set) var leftEyeClosed = false
private(set) var rightEye: CGPoint? = nil
private(set) var rightEyeClosed = false
private(set) var hasSmile = false
private(set) var faceAngle: Float? = nil
private(set) var knownPerson = false
var hasMouth: Bool { return mouth != nil }
var hasLeftEye: Bool { return leftEye != nil }
var hasRightEye: Bool { return rightEye != nil }
init(faceRect: CGRect) {
self.rect = faceRect
}
}
final class FaceDetector {
private let ciDetector = CIDetector(ofType:CIDetectorTypeFace,
context:nil,
options:[
CIDetectorAccuracy:CIDetectorAccuracyHigh,
CIDetectorSmile:true])
//顔認識情報を使いやすくして入れる
func detect(image: UIImage) -> [Face] {
let ciimage = CIImage(CGImage: image.CGImage!)
let featues = ciDetector.featuresInImage(ciimage)
let imageSize = image.size
return featues.filter { $0 is CIFaceFeature }.map { feature -> Face in
let feature = feature as! CIFaceFeature
let faceRect = convertRect(imageSize, rect: feature.bounds)
var face = Face(faceRect: faceRect)
if feature.hasMouthPosition {
face.mouth = convertPoint(imageSize, point: feature.mouthPosition)
}
if feature.hasFaceAngle {
face.faceAngle = feature.faceAngle
}
if feature.hasLeftEyePosition {
face.leftEye = convertPoint(imageSize, point: feature.leftEyePosition)
}
if feature.hasRightEyePosition {
face.rightEye = convertPoint(imageSize, point: feature.rightEyePosition)
}
face.leftEyeClosed = feature.leftEyeClosed
face.rightEyeClosed = feature.rightEyeClosed
face.knownPerson = feature.hasTrackingID
face.hasSmile = feature.hasSmile
return face
}
}
//位置の確認用
class func displayDetectedPosition(sourceImage: UIImage, faces: [Face]) -> UIImage {
let makeRect = {(point: CGPoint) -> CGRect in
var point = point
let baseWidth: CGFloat = 10.0
point.x = point.x - (baseWidth / 2.0)
point.y = point.y - (baseWidth / 2.0)
return CGRect(origin: point, size: CGSize(width: baseWidth, height: baseWidth))
}
var rects = Array<CGRect>()
faces.forEach{
rects.append($0.rect)
if let p = $0.mouth { rects.append(makeRect(p)) }
if let p = $0.leftEye { rects.append(makeRect(p)) }
if let p = $0.rightEye { rects.append(makeRect(p)) }
}
return testOverlay(sourceImage, rects: rects)
}
//四角を描画テスト
private class func testOverlay(before: UIImage, rects: [CGRect]) -> UIImage {
var after = UIImage()
UIGraphicsBeginImageContextWithOptions(before.size, false, 1.0)
let context = UIGraphicsGetCurrentContext()
before.drawInRect(CGRect(origin: CGPointZero, size: before.size))
CGContextSetStrokeColorWithColor(context, UIColor.redColor().CGColor)
CGContextSetLineWidth(context, 2.0)
for rect in rects {
CGContextStrokeRect(context, rect)
}
after = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return after
}
//CIDetector座標を普通の座標に変換
private func convertRect(size: CGSize, rect: CGRect) -> CGRect {
var rect = rect
rect.origin.y = size.height - rect.origin.y - rect.height
return rect
}
//CIDetector座標を普通の座標に変換
private func convertPoint(size: CGSize, point: CGPoint) -> CGPoint {
var point = point
point.y = size.height - point.y
return point
}
}
@liuweicode
Copy link

pretty cool

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment