Skip to content

Instantly share code, notes, and snippets.

@benevpi
Last active November 3, 2023 13:26
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save benevpi/92ebc2e69e13608d3051e97ff78c9d98 to your computer and use it in GitHub Desktop.
Save benevpi/92ebc2e69e13608d3051e97ff78c9d98 to your computer and use it in GitHub Desktop.
#!/usr/bin/python3
#Originally be Alasdair Allen (see below), modified by Ben Everard 2023
# Copyright (c) 2022 Raspberry Pi Ltd
# Author: Alasdair Allan <alasdair@raspberrypi.com>
# SPDX-License-Identifier: BSD-3-Clause
# A TensorFlow Lite example for Picamera2 on Raspberry Pi OS Bullseye
#
# Install necessary dependences before starting,
#
# $ sudo apt update
# $ sudo apt install build-essential
# $ sudo apt install libatlas-base-dev
# $ sudo apt install python3-pip
# $ pip3 install tflite-runtime
# $ pip3 install opencv-python==4.4.0.46
# $ pip3 install pillow
# $ pip3 install numpy
#
# and run from the command line,
#
# $ python3 real_time_with_labels.py --model mobilenet_v2.tflite --label coco_labels.txt
import argparse
import numpy as np
import tflite_runtime.interpreter as tflite
from picamera2 import MappedArray, Picamera2, Preview
from adafruit_motorkit import MotorKit
import time
seeking = "person"
normalSize = (640, 480)
lowresSize = (320, 240)
rectangles = []
def ReadLabelFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def InferenceTensorFlow(image, model, output, camera, label=None):
global rectangles
if label:
labels = ReadLabelFile(label)
else:
labels = None
interpreter = tflite.Interpreter(model_path=model, num_threads=4)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = False
if input_details[0]['dtype'] == np.float32:
floating_model = True
rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
initial_h, initial_w, channels = rgb.shape
picture = cv2.resize(rgb, (width, height))
input_data = np.expand_dims(picture, axis=0)
if floating_model:
input_data = (np.float32(input_data) - 127.5) / 127.5
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
detected_boxes = interpreter.get_tensor(output_details[0]['index'])
detected_classes = interpreter.get_tensor(output_details[1]['index'])
detected_scores = interpreter.get_tensor(output_details[2]['index'])
num_boxes = interpreter.get_tensor(output_details[3]['index'])
for i in range(int(num_boxes)):
top, left, bottom, right = detected_boxes[0][i]
classId = int(detected_classes[0][i])
score = detected_scores[0][i]
if score > 0.5:
if (labels[classId] == seeking):
print("found on camera ", camera)
return True
return False
#initialise motors:
kit = MotorKit()
max_speed = 0.3
motors_forward = [1, 1, -1, -1]
motors = [kit.motor1, kit.motor2, kit.motor3, kit.motor4]
#process arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='Path of the detection model.', required=True)
parser.add_argument('--label', help='Path of the labels file.')
parser.add_argument('--output', help='File path of the output image.')
args = parser.parse_args()
if (args.output):
output_file = args.output
else:
output_file = 'out.jpg'
if (args.label):
label_file = args.label
else:
label_file = None
#initialise cameras
picam0 = Picamera2(0)
picam0.start_preview(Preview.QTGL)
config = picam0.create_preview_configuration(main={"size": normalSize},
lores={"size": lowresSize, "format": "YUV420"})
picam0.configure(config)
stride = picam0.stream_configuration("lores")["stride"]
picam0.start()
picam1 = Picamera2(1)
picam1.start_preview(Preview.QTGL)
config = picam1.create_preview_configuration(main={"size": normalSize},
lores={"size": lowresSize, "format": "YUV420"})
picam1.configure(config)
stride = picam1.stream_configuration("lores")["stride"]
picam1.start()
#import here because of a clash with picamera
import cv2
max_speed = 0.5
left_direction = 1
right_direction = -1
def set_motors(left, right):
left_speed = 0
right_speed = 0
if left:
left_speed = max_speed * left_direction
if right:
right_speed = max_speed * right_direction
kit.motor1.throttle = left_speed
kit.motor2.throttle = left_speed
kit.motor3.throttle = right_speed
kit.motor4.throttle = right_speed
while True:
buffer = picam0.capture_buffer("lores")
grey = buffer[:stride * lowresSize[1]].reshape((lowresSize[1], stride))
found_left = InferenceTensorFlow(grey, args.model, output_file,0, label_file)
buffer = picam1.capture_buffer("lores")
grey = buffer[:stride * lowresSize[1]].reshape((lowresSize[1], stride))
found_right = InferenceTensorFlow(grey, args.model, output_file,1, label_file)
set_motors(found_left, found_right)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment