Created
April 23, 2018 21:28
-
-
Save aallan/3ea534f1aaf550b7f9e6895c0d0c216f to your computer and use it in GitHub Desktop.
Using the AIY Projects Vision Kit and a micro-servo to build a face-tracking dinosaur. See https://medium.com/@aallan/the-do-you-think-he-saurs-f1fdbc8846de.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
import argparse | |
from aiy.vision.inference import CameraInference | |
from aiy.vision.models import face_detection | |
from examples.vision.annotator import Annotator | |
from picamera import PiCamera | |
from gpiozero import Servo | |
from aiy.vision.pins import PIN_A | |
def main(): | |
parser = argparse.ArgumentParser() | |
parser.add_argument( | |
'--num_frames', | |
'-n', | |
type=int, | |
dest='num_frames', | |
default=-1, | |
help='Sets the number of frames to run for, otherwise runs forever.') | |
args = parser.parse_args() | |
with PiCamera() as camera: | |
camera.sensor_mode = 4 | |
camera.resolution = (1640, 1232) | |
camera.framerate = 30 | |
camera.start_preview() | |
servo = Servo(PIN_A, min_pulse_width=.0005, max_pulse_width=.0019) | |
servo.mid() | |
position = 0 | |
zero_counter = 0 | |
annotator = Annotator(camera, dimensions=(320, 240)) | |
scale_x = 320 / 1640 | |
scale_y = 240 / 1232 | |
def transform(bounding_box): | |
x, y, width, height = bounding_box | |
return (scale_x * x, scale_y * y, scale_x * (x + width), | |
scale_y * (y + height)) | |
with CameraInference(face_detection.model()) as inference: | |
for i, result in enumerate(inference.run()): | |
if i == args.num_frames: | |
break | |
faces = face_detection.get_faces(result) | |
annotator.clear() | |
for face in faces: | |
annotator.bounding_box(transform(face.bounding_box), fill=0) | |
annotator.update() | |
print('Iteration #%d: num_faces=%d' % (i, len(faces))) | |
if faces: | |
face = faces[0] | |
x, y, width, height = face.bounding_box | |
print(' : Face is at %d' % x) | |
if x < 300: | |
print(' : Face left of center') | |
position = position - 0.1 | |
if position < -1: | |
position = -0.99 | |
elif x > 500: | |
print(' : Face right of center') | |
position = position + 0.1 | |
if position > 1: | |
position = 0.99 | |
else: | |
print(' : Face in CENTER of image') | |
positon = position | |
servo.value = position | |
else: | |
zero_counter = zero_counter + 1 | |
if zero_counter == 100: | |
servo.mid() | |
position = 0 | |
print(' :Ignoring you') | |
zero_counter = 0 | |
camera.stop_preview() | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Modules have changed since this was made.
examples.vision.annotator is now aiy.vision.annotator
and aiy.vision.pins is now aiy.pins