Skip to content

Instantly share code, notes, and snippets.

@adriancbo
Forked from catalinmiron/App.tsx
Created June 7, 2021 09:10
Show Gist options
  • Save adriancbo/63bd7cee2a438d477fbf1e8d3c43bd68 to your computer and use it in GitHub Desktop.
Save adriancbo/63bd7cee2a438d477fbf1e8d3c43bd68 to your computer and use it in GitHub Desktop.
Slow Camera
export const App => <DepthCameraComp />
#import "React/RCTViewManager.h"
#import "React/RCTBridge.h"
#import "React/RCTEventDispatcher.h"
#import "React/RCTBridgeModule.h"
@interface RCT_EXTERN_MODULE(CameraBridge, RCTViewManager)
RCT_EXTERN_METHOD(capture: (RCTPromiseResolveBlock)resolve
rejecter: (RCTPromiseRejectBlock)reject)
@end
//
// CameraBridge.swift
// anysticker
//
// Created by Jonny Burger on 07.03.20.
// Copyright © 2020 Facebook. All rights reserved.
//
import Foundation
import Photos
import Vision
import AVFoundation
import MobileCoreServices
public extension FileManager {
func temporaryFileURL(fileName: String = UUID().uuidString) -> URL? {
return URL(fileURLWithPath: NSTemporaryDirectory(), isDirectory: true).appendingPathComponent(fileName)
}
}
let downSampling = Float(1.0)
extension CIImage {
func resizeCI(size:CGSize) -> CIImage {
let scale = (Double)(size.width) / (Double)(self.extent.width)
let image = self
let filter = CIFilter(name: "CILanczosScaleTransform")!
filter.setValue(image, forKey: kCIInputImageKey)
filter.setValue(NSNumber(value:scale), forKey: kCIInputScaleKey)
filter.setValue(1.0, forKey:kCIInputAspectRatioKey)
let outputImage = filter.value(forKey: kCIOutputImageKey) as! UIKit.CIImage
return outputImage
}
}
func flipImage(_ image: CGImage) -> CGImage? {
UIGraphicsBeginImageContextWithOptions(CGSize(width: image.width, height: image.height), false, 1)
let bitmap = UIGraphicsGetCurrentContext()!
let resizedWidth = Int(image.width / 2)
let resizedHeight = Int(image.height / 2)
bitmap.translateBy(x: CGFloat(resizedWidth), y: CGFloat(resizedHeight))
bitmap.scaleBy(x: -1.0, y: -1.0)
bitmap.translateBy(x: CGFloat(-resizedWidth), y: -CGFloat(resizedHeight))
bitmap.draw(image, in: CGRect(x: 0, y: 0, width: resizedWidth * 2, height: resizedHeight * 2))
return bitmap.makeImage()
}
func createMatchingBackingDataWithImage(imageRef: CGImage?, orientation: UIImage.Orientation) -> CGImage? {
var orientedImage: CGImage?
if let imageRef = imageRef {
let originalWidth = imageRef.width
let originalHeight = imageRef.height
let bitsPerComponent = imageRef.bitsPerComponent
let bytesPerRow = imageRef.bytesPerRow
let colorSpace = imageRef.colorSpace
let bitmapInfo = imageRef.bitmapInfo
var degreesToRotate: Double
degreesToRotate = -90.0
let radians = degreesToRotate * Double.pi / 180
var width: Int
var height: Int
width = originalHeight
height = originalWidth
if let contextRef = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace!, bitmapInfo: bitmapInfo.rawValue) {
contextRef.translateBy(x: 0, y: CGFloat(height) / 2)
contextRef.rotate(by: CGFloat(radians))
contextRef.scaleBy(x: 1.0, y: -1.0)
contextRef.translateBy(x: -CGFloat(height)/2, y: -CGFloat(width))
contextRef.draw(imageRef, in: CGRect(x: 0, y: 0, width: originalWidth, height: originalHeight))
orientedImage = contextRef.makeImage()
}
}
return orientedImage
}
// CounterViewManager.swift
@objc(CameraBridge)
class CameraBridge: RCTViewManager, AVCapturePhotoCaptureDelegate {
var previewView: UIView!
var captureSession: AVCaptureSession!
var stillImageOutput: AVCapturePhotoOutput!
var videoPreviewLayer: AVCaptureVideoPreviewLayer!
lazy var context = CIContext()
func handleMatteData(_ photo: AVCapturePhoto, ssmType: AVSemanticSegmentationMatte.MatteType) -> CIImage {
// Find the semantic segmentation matte image for the specified type.
guard var segmentationMatte = photo.semanticSegmentationMatte(for: ssmType) else { return CIImage() }
// Retrieve the photo orientation and apply it to the matte image.
if let orientation = photo.metadata[String(kCGImagePropertyOrientation)] as? UInt32,
let exifOrientation = CGImagePropertyOrientation(rawValue: orientation) {
// Apply the Exif orientation to the matte image.
segmentationMatte = segmentationMatte.applyingExifOrientation(exifOrientation)
}
var imageOption: CIImageOption!
// Switch on the AVSemanticSegmentationMatteType value.
switch ssmType {
case .hair:
imageOption = .auxiliarySemanticSegmentationHairMatte
case .skin:
imageOption = .auxiliarySemanticSegmentationSkinMatte
default:
print("This semantic segmentation type is not supported!")
return CIImage();
}
guard let perceptualColorSpace = CGColorSpace(name: CGColorSpace.sRGB) else { return CIImage()}
// Create a new CIImage from the matte's underlying CVPixelBuffer.
let ciImage = CIImage( cvImageBuffer: segmentationMatte.mattingImage,
options: [imageOption: true,
.colorSpace: perceptualColorSpace])
return ciImage;
}
func photoOutput(_ output: AVCapturePhotoOutput, willCapturePhotoFor resolvedSettings: AVCaptureResolvedPhotoSettings) {
print("settings \(NSDate().timeIntervalSince1970)")
self.width = Int(resolvedSettings.photoDimensions.width)
self.height = Int(resolvedSettings.photoDimensions.height)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
print("output \(NSDate().timeIntervalSince1970)")
output.isDepthDataDeliveryEnabled = true
var semanticSegmentationMatteDataArray = [Data]()
var i = 0
guard let photoFileName = FileManager.default.temporaryFileURL(fileName: "photo.jpg") else {
return;
}
let cImage = photo.cgImageRepresentation()?.takeUnretainedValue()
let destination = CGImageDestinationCreateWithURL(photoFileName as CFURL, kUTTypeJPEG, 1, nil)!
let oldCgImage = createMatchingBackingDataWithImage(imageRef: cImage, orientation: .left)
let newCgImage = oldCgImage
CGImageDestinationAddImage(destination, newCgImage!, nil)
CGImageDestinationFinalize(destination)
print("sending photo \(NSDate().timeIntervalSince1970)")
DepthEvents.shared?.sendPhoto(name: "\(photoFileName.absoluteString),\(self.width),\(self.height)")
recognizeFacialLandmarks(photo: photo)
for index in output.enabledSemanticSegmentationMatteTypes.indices {
var img = handleMatteData(photo, ssmType: output.enabledSemanticSegmentationMatteTypes[index])
let dim = CGSize.init(width: 720, height: 1080)
img = img.resizeCI(size: dim)
guard let perceptualColorSpace = CGColorSpace(name: CGColorSpace.sRGB) else { return}
guard let imageData = (context.jpegRepresentation(of: img,
colorSpace: perceptualColorSpace,
options: [.depthImage: img])) else { return }
let cgImageMatte = context.createCGImage(img, from: img.extent)
let matte = flipImage(cgImageMatte!)
guard let fileName = FileManager.default.temporaryFileURL(fileName: "matte-\(i).jpg") else {
continue;
}
let matteDestination = CGImageDestinationCreateWithURL(fileName as CFURL, kUTTypeJPEG, 1, nil)!
CGImageDestinationAddImage(matteDestination, matte! , nil)
CGImageDestinationFinalize(matteDestination)
DepthEvents.shared?.sendMatte(name: fileName.absoluteString, type: output.enabledSemanticSegmentationMatteTypes[index].rawValue)
i += 1
semanticSegmentationMatteDataArray.append(imageData)
}
}
func recognizeFacialLandmarks(photo: AVCapturePhoto) {
var requests: [VNRequest] = []
requests.append(VNDetectFaceLandmarksRequest(completionHandler: self.handleDetectedFaceLandmarks))
guard let cgImage = photo.cgImageRepresentation() else {
return
}
let imageRequestHandler = VNImageRequestHandler(cgImage: cgImage.takeUnretainedValue(),
orientation: .up,
options: [:])
DispatchQueue.global(qos: .userInitiated).async {
do {
try imageRequestHandler.perform(requests)
} catch let error as NSError {
print("Failed to perform image request: \(error)")
return
}
}
}
private var width: Int = 0
private var height: Int = 0
fileprivate func handleDetectedFaceLandmarks(request: VNRequest?, error: Error?) {
if error != nil {
return
}
// Perform drawing on the main thread.
DispatchQueue.main.async {
guard let results = request?.results as? [VNFaceObservation] else {
return
}
if results.count == 0 {
DepthEvents.shared!.sendLandmarks(args: "")
}
for faceObservation in results {
guard let landmarks = faceObservation.landmarks else {
continue
}
guard let faceContour = landmarks.faceContour else {
continue
}
let hmm = faceContour.pointsInImage(imageSize: CGSize(width: Int(Float(self.width) / downSampling), height: Int(Float(self.height) / downSampling)))
// Intentional: Image was rotated
var points = "width:\(Float(self.height)/downSampling),height:\(Float(self.width)/downSampling)|"
for i in hmm {
points = "\(points)\n\(i.y),\(i.x)"
}
print("sending landmarks \(NSDate().timeIntervalSince1970)")
DepthEvents.shared!.sendLandmarks(args: points)
}
}
}
@objc func capture(_ resolve: RCTPromiseResolveBlock,
rejecter reject: RCTPromiseRejectBlock) -> Void {
print("capture \(NSDate().timeIntervalSince1970)")
videoPreviewLayer.connection?.isEnabled = false
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
settings.isDepthDataDeliveryEnabled = true
settings.photoQualityPrioritization = .balanced
settings.enabledSemanticSegmentationMatteTypes = [.hair, .skin]
stillImageOutput.capturePhoto(with: settings, delegate: self)
}
override func view() -> UIView! {
print("make view \(NSDate().timeIntervalSince1970)")
self.previewView = UIView()
guard let frontCamera = AVCaptureDevice.default(.builtInTrueDepthCamera, for: .video, position: .front) else {
print("Unable to access back camera!")
return UIView()
}
guard let input = try? AVCaptureDeviceInput(device: frontCamera) else {
print("Unable to capture device")
return UIView()
}
self.captureSession = AVCaptureSession()
self.captureSession.sessionPreset = .hd1280x720
self.stillImageOutput = AVCapturePhotoOutput()
self.stillImageOutput.isHighResolutionCaptureEnabled = false
self.stillImageOutput.connection(with: .video)?.videoOrientation = .portrait
if self.captureSession.canAddInput(input) && self.captureSession.canAddOutput(self.stillImageOutput) {
self.captureSession.beginConfiguration()
self.captureSession.addInput(input)
}
self.captureSession.addOutput(self.stillImageOutput)
self.stillImageOutput.isDepthDataDeliveryEnabled = true
self.stillImageOutput.enabledSemanticSegmentationMatteTypes = self.stillImageOutput.availableSemanticSegmentationMatteTypes
DispatchQueue.global().async {
self.captureSession.commitConfiguration()
self.captureSession.startRunning()
DispatchQueue.main.async {
self.videoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
let newActions = [
"onOrderIn": NSNull(),
"onOrderOut": NSNull(),
"sublayers": NSNull(),
"contents": NSNull(),
"bounds": NSNull(),
]
self.videoPreviewLayer.actions = newActions
self.videoPreviewLayer.videoGravity = .resizeAspectFill
self.videoPreviewLayer.connection?.videoOrientation = .portrait
self.previewView.layer.addSublayer(self.videoPreviewLayer)
self.videoPreviewLayer.layoutIfNeeded()
self.videoPreviewLayer.frame = self.previewView.bounds
}
}
print("finished view \(NSDate().timeIntervalSince1970)")
return previewView
}
override static func requiresMainQueueSetup() -> Bool {
return false
}
}
import {requireNativeComponent} from 'react-native';
export const DepthCameraComp = requireNativeComponent('CameraBridge');
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment