Skip to content

Instantly share code, notes, and snippets.

public class CameraActivity extends Activity implements ImageReader.OnImageAvailableListener {
// The rest of your activity
@Override
public void onImageAvailable(final ImageReader reader) {
// The FritzVisionImage class makes it easy to manipulate images used as model inputs.
Image image = reader.acquireLatestImage();
final FritzVisionImage fritzVisionImage = FritzVisionImage.fromMediaImage(image, imageRotation);
image.close();
import ai.fritz.vision.FritzVision;
import ai.fritz.poseestimationmodel.PoseEstimationOnDeviceModel;
import ai.fritz.vision.poseestimation.FritzVisionPosePredictor;
import ai.fritz.core.FritzOnDeviceModel;
// ...
public class CameraActivity extends Activity implements ImageReader.OnImageAvailableListener {
private FritzVisionPosePredictor posePredictor;
private FritzVisionPoseResult poseResult;
guard let pose = poseResult.decodePose() else { return }
let leftArmParts: [PosePart] = [.leftWrist, .leftElbow, .leftShoulder]
let rightArmParts: [PosePart] = [.rightWrist, .rightElbow, .rightShoulder]
var foundLeftArm: [Keypoint] = []
var foundRightArm: [Keypoint] = []
for keypoint in pose.keypoints {
if leftArmParts.contains(keypoint.part) {
extension PoseEstimationViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// FritzVisionImage objects offer convient ways to manipulate
// images used as input to machine learning models.
// You can resize, crop, and scale images to your needs.
let image = FritzVisionImage(sampleBuffer: sampleBuffer, connection: connection)
// Set options for our pose estimation model using the constants
// we initialized earlier in the ViewController.
import Fritz
class PoseEstimationViewController: UIViewController {
private let poseModel = FritzVisionPoseModel()
// We can also set of sensitivity parameters for our model.
// The poseThreshold is a number between 0 and 1. Higher numbers mean
// the model must be more confident about its estimate, thus reducing false
// positives.
internal var poseThreshold: Double = 0.3
class MainActivity : AppCompatActivity() {
private lateinit var renderScript: RenderScript
private lateinit var yuvToRGB: ScriptIntrinsicYuvToRGB
private var yuvDataLength: Int = 0
private lateinit var allocationIn: Allocation
private lateinit var allocationOut: Allocation
private lateinit var bitmapOut: Bitmap
private val itemMap by lazy {
class MainActivity : AppCompatActivity() {
//Map that will contain a key-value combination of detected items
private val itemMap by lazy {
hashMapOf<String, Int>()
}
override fun onCreate(savedInstanceState: Bundle?) {
val onDeviceModel = new ObjectDetectionOnDeviceModel();
val objectPredictor = FritzVision.ObjectDetection.getPredictor(onDeviceModel);
var fritzVisionImage: FritzVisionImage
class MainActivity : AppCompatActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
val onDeviceModel = new ObjectDetectionOnDeviceModel();
val objectPredictor = FritzVision.ObjectDetection.getPredictor(onDeviceModel);
var fritzVisionImage: FritzVisionImage
cameraView.addFrameProcessor { frame ->
if (yuvDataLength == 0) {
//Run this only once
initializeData()
class MainActivity : AppCompatActivity() {
private lateinit var renderScript: RenderScript
private lateinit var yuvToRGB: ScriptIntrinsicYuvToRGB
private var yuvDataLength: Int = 0
private lateinit var allocationIn: Allocation
private lateinit var allocationOut: Allocation
private lateinit var bitmapOut: Bitmap
//Rest of the code
class ViewController: UIViewController {
var cameraView: UIImageView!
var maskView: UIImageView!
override func viewDidLoad() {
// ...
cameraView = UIImageView(frame: view.bounds)
cameraView.contentMode = .scaleAspectFill