Skip to content

Instantly share code, notes, and snippets.

View hietalajulius's full-sized avatar

Julius Hietala hietalajulius

View GitHub Profile
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
var bufferSize: CGSize = .zero
var inferenceTime: CFTimeInterval = 0;
private let session = AVCaptureSession()
@IBOutlet weak var previewView: UIView!
var rootLayer: CALayer! = nil
private var previewLayer: AVCaptureVideoPreviewLayer! = nil
private var detectionLayer: CALayer! = nil
override func viewDidLoad() {
super.viewDidLoad()
setupCapture()
setupOutput()
setupLayers()
try? setupVision()
session.startRunning()
}
func setupCapture() {
var deviceInput: AVCaptureDeviceInput!
let videoDevice = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .back).devices.first
do {
deviceInput = try AVCaptureDeviceInput(device: videoDevice!)
} catch {
print("Could not create video device input: \(error)")
return
}
func setupLayers() {
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
rootLayer = previewView.layer
previewLayer.frame = rootLayer.bounds
rootLayer.addSublayer(previewLayer)
inferenceTimeBounds = CGRect(x: rootLayer.frame.midX-75, y: rootLayer.frame.maxY-70, width: 150, height: 17)
inferenceTimeLayer = createRectLayer(inferenceTimeBounds, [1,1,1,1])
func setupVision() throws {
guard let modelURL = Bundle.main.url(forResource: "yolov5n", withExtension: "mlmodelc") else {
throw NSError(domain: "ViewController", code: -1, userInfo: [NSLocalizedDescriptionKey: "Model file is missing"])
}
do {
let visionModel = try VNCoreMLModel(for: MLModel(contentsOf: modelURL))
let objectRecognition = VNCoreMLRequest(model: visionModel, completionHandler: { (request, error) in
DispatchQueue.main.async(execute: {
if let results = request.results {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: .right, options: [:])
do {
let start = CACurrentMediaTime()
try imageRequestHandler.perform(self.requests)
inferenceTime = (CACurrentMediaTime() - start)
use actix::{Actor, Context, Handler};
use actix_broker::{BrokerIssue, BrokerSubscribe};
use actix_rt::System;
use std::println;
#[derive(actix::Message, Clone, Debug)]
#[rtype(result = "()")]
struct Message1 {
pub level: i32,
}
#[derive(actix::Message, Clone, Debug)]
// Initialize the random number generator
let mut rng = StdRng::seed_from_u64(123);
// ...
// Set the batch size for training
let batch_size = 16;
// ...
// Generate random input samples within the range [-3.0, 3.0]
let x: Array2<f64> = Array2::from_shape_fn((batch_size, 2), |_| rng.gen_range(-3.0..=3.0));
// Compute the target values for the input samples using the PDF function
// Create a new neural network with 2 input features, 8 hidden units in each of the two hidden layers, and 1 output feature
let mut nn = NN::new(2, 8, 8, 1, &mut rng);
// ...
// Perform a forward pass through the neural network to compute the predicted output
let y = nn.forward(&x);
// Create a mean squared error (MSE) loss function
let mut loss = MSE::new();
// ...
// Compute the loss value using the MSE loss function
let loss_value = loss.forward(&y, &target);