Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
A list of little demos for picademy - see https://github.com/waveform80/picamera_demos for updated versions
import picamera
import numpy as np
from picamera.array import PiRGBAnalysis
from picamera.color import Color
class MyColorAnalyzer(PiRGBAnalysis):
def __init__(self, camera):
super(MyColorAnalyzer, self).__init__(camera)
self.last_color = 'none'
def analyse(self, a):
# Calculate the average color of the pixels in the middle box
c = Color(
r=int(np.mean(a[30:60, 60:120, 0])),
g=int(np.mean(a[30:60, 60:120, 1])),
b=int(np.mean(a[30:60, 60:120, 2]))
)
# Convert the color to hue, saturation, lightness
h, l, s = c.hls
c = 'none'
# If the saturation is sufficiently high, determine the color
# from the hue
if s > 1/3:
if h > 8/9 or h < 1/36:
c = 'red'
elif 5/9 < h < 2/3:
c = 'blue'
elif 5/36 < h < 4/9:
c = 'green'
# If the color has changed, update the display
if c != self.last_color:
self.camera.annotate_text = c
self.last_color = c
with picamera.PiCamera() as camera:
camera.resolution = (160, 90)
camera.framerate = 24
# Fix the camera's white-balance gains
camera.awb_mode = 'off'
camera.awb_gains = (1.4, 1.5)
# Draw a box over the area we're going to watch
camera.start_preview(alpha=128)
box = np.zeros((96, 160, 3), dtype=np.uint8)
box[30:60, 60:120, :] = 0x80
camera.add_overlay(memoryview(box), size=(160, 90), layer=3, alpha=64)
# Construct the analysis output and start recording data to it
with MyColorAnalyzer(camera) as color_analyzer:
camera.start_recording(color_analyzer, 'rgb')
while True:
camera.wait_recording(1)
camera.stop_recording()
import os
import numpy as np
import picamera
from picamera.array import PiMotionAnalysis
QUEUE_SIZE = 10 # the number of consecutive frames to analyze
THRESHOLD = 4.0 # the minimum average motion required in either axis
class MyGestureDetector(PiMotionAnalysis):
def __init__(self, camera):
super(MyGestureDetector, self).__init__(camera)
self.x_queue = np.zeros(QUEUE_SIZE, dtype=np.float)
self.y_queue = np.zeros(QUEUE_SIZE, dtype=np.float)
def analyse(self, a):
# Roll the queues and overwrite the first element with a new
# mean (equivalent to pop and append)
self.x_queue = np.roll(self.x_queue, 1)
self.y_queue = np.roll(self.y_queue, 1)
self.x_queue[0] = a['x'].mean()
self.y_queue[0] = a['y'].mean()
# Calculate the mean of both queues
x_mean = self.x_queue.mean()
y_mean = self.y_queue.mean()
# Convert left/up to -1, right/down to 1, and movement below
# the threshold to 0
x_move = ('' if abs(x_mean) < THRESHOLD else 'left' if x_mean < 0.0 else 'right')
y_move = ('' if abs(y_mean) < THRESHOLD else 'up' if y_mean < 0.0 else 'down')
# Update the display
self.camera.annotate_text = '%s %s' % (x_move, y_move)
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 24
with MyGestureDetector(camera) as gesture_detector:
camera.start_recording(
os.devnull, format='h264', motion_output=gesture_detector)
try:
while True:
camera.wait_recording(1)
finally:
camera.stop_recording()
import os
import picamera
import numpy as np
from picamera.array import PiMotionAnalysis
# A simple demo of sub-classing PiMotionAnalysis to construct a motion detector
MOTION_MAGNITUDE = 60 # the magnitude of vectors required for motion
MOTION_VECTORS = 10 # the number of vectors required to detect motion
class MyMotionDetector(PiMotionAnalysis):
def analyse(self, a):
# Calculate the magnitude of all vectors with pythagoras' theorem
a = np.sqrt(
np.square(a['x'].astype(np.float)) +
np.square(a['y'].astype(np.float))
).clip(0, 255).astype(np.uint8)
# Count the number of vectors with a magnitude greater than our
# threshold
vector_count = (a > MOTION_MAGNITUDE).sum()
if vector_count > MOTION_VECTORS:
print('Detected motion!')
with picamera.PiCamera() as camera:
camera.resolution = (1280, 720)
camera.framerate = 24
with MyMotionDetector(camera) as motion_detector:
camera.start_recording(
os.devnull, format='h264', motion_output=motion_detector)
try:
while True:
camera.wait_recording(1)
finally:
camera.stop_recording()
import os
import numpy as np
import picamera
from picamera.array import PiMotionAnalysis
QUEUE_SIZE = 10 # the number of consecutive frames to analyze
THRESHOLD = 100.0 # the minimum average magnitude of vectors across the frames
class MySpeedDetector(PiMotionAnalysis):
def __init__(self, camera):
super(MySpeedDetector, self).__init__(camera)
self.x_queue = np.zeros(QUEUE_SIZE, dtype=np.float)
self.y_queue = np.zeros(QUEUE_SIZE, dtype=np.float)
def analyse(self, a):
self.x_queue = np.roll(self.x_queue, 1)
self.y_queue = np.roll(self.y_queue, 1)
# If you want to detect objects that don't fill the camera's view,
# limit the vectors that are considered here
self.x_queue[0] = a['x'].mean()
self.y_queue[0] = a['y'].mean()
x_mean = self.x_queue.mean()
y_mean = self.y_queue.mean()
avg_speed = ((x_mean ** 2) + (y_mean ** 2) ** (1/2))
if avg_speed > 100.0:
print('Speeder detected: %.2f' % avg_speed)
with picamera.PiCamera() as camera:
# Use the highest framerate possible, with a high enough resolution to
# make out fast moving objects
camera.resolution = (640, 480)
camera.framerate = 90
with MySpeedDetector(camera) as speed_detector:
camera.start_recording(
os.devnull, format='h264', motion_output=speed_detector)
try:
while True:
camera.wait_recording(1)
finally:
camera.stop_recording()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment