Skip to content

Instantly share code, notes, and snippets.

@piyh
Last active July 16, 2016 03:45
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save piyh/dd3721e560a4db5caa4bb00e88e794bb to your computer and use it in GitHub Desktop.
Save piyh/dd3721e560a4db5caa4bb00e88e794bb to your computer and use it in GitHub Desktop.
import picamera
import datetime
import json
import time
import numpy
import cv2
import sys
from subprocess import call
import os
import signal
from PIL import Image
import io
class ChiWatcher:
def __init__(self):
#load the configuration
self.conf = json.load(open('conf.json'))
#initalize variables
self.prior_image = None
self.avg = None
self.motionCounter = 0
self.motionlessCounter = 10 #set to 10 so we're assuming that we're starting out without motion
self.motionEvent = False
self.videoWritten = False
self.frame = None
self.stopped = False
#capture interrupt and term signals
signal.signal(signal.SIGINT, self.stopStream)
signal.signal(signal.SIGTERM, self.stopStream)
def start(self):
with picamera.PiCamera() as self.camera:
self.camera.resolution = tuple(self.conf["resolution"])
self.camera.framerate = self.conf["fps"]
self.camera.video_stabilization = True
self.camera.vflip = True
time.sleep(2)
self.stream = picamera.PiCameraCircularIO(self.camera, seconds=10)
self.camera.start_recording(self.stream, format=self.conf["codec"])
while not self.stopped:
if self.motionEventCalc():
if self.videoWritten is False:
print "Writing video"
ts = datetime.datetime.now().strftime("%Y%b%d-%H%M%S")
#write buffer to disk
self.write_video(self.stream, '/home/pi/camera/' + ts + ('b.h264'))
#record motion to separate file on disk
self.camera.split_recording('/home/pi/camera/' + ts + 'a.h264')
self.videoWritten = True
else:
if self.videoWritten:
print('Video stopped')
self.camera.split_recording(self.stream)
self.videoWritten = False
#concatenate two h264 video clips and mux it into an mkv format
concat = 'MP4Box -cat /home/pi/camera/' + ts + 'b.h264 -cat /home/pi/camera/' + ts + 'a.h264 /home/pi/camera/' + ts + '.h264'
mux = "MP4Box -add /home/pi/camera/" + ts + ".h264 /home/pi/camera/" + ts + ".mkv"
call ([concat], shell=True)
os.remove("/home/pi/camera/" + ts + "b.h264")
os.remove("/home/pi/camera/" + ts + "a.h264")
call ([mux], shell=True)
os.remove("/home/pi/camera/" + ts + ".h264")
def write_video(self, stream, filename):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it simultaneously
with io.open(filename, 'wb') as output:
for frame in self.stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
self.stream.seek(frame.position)
break
while True:
buf = self.stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
self.stream.seek(0)
self.stream.truncate()
def motionEventCalc(self):
#count consecutive frames with or without motion
if self.detectMotion():
self.motionCounter += 1
self.motionlessCounter = 0
else:
self.motionlessCounter += 1
#if there's enough motion, mark event flag and start new recording clip
if self.motionCounter == self.conf["min_motion_frames"]:
self.motionCounter += 1 #needed in event of next frame not having motion
self.motionEvent = True
print "motion event start"
#if min motion frames have passed without motion, stop recording to clip
if self.motionlessCounter == self.conf["min_motion_frames"]:
if self.motionEvent == True:
print "motion event end"
self.motionEvent = False
self.motionCounter = 0
#DEBUG
#print "Motionless Frames:", str(self.motionlessCounter) + "Motion Frames:", str(self.motionCounter)
return self.motionEvent
def detectMotion(self):
motionInFrame = False
current_image = io.BytesIO()
#capture jpeg
self.camera.capture(current_image, format='jpeg',resize=(960,540),use_video_port=True)
current_image = numpy.array(Image.open(current_image))
current_image = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
current_image = cv2.GaussianBlur(current_image, (21, 21), 0)
if self.avg is None:
print "[INFO] starting background model..."
self.avg = current_image.copy().astype("float")
return False
#accumulate the weighted average between the current frame and previous frames, then compute the difference
#between the current frame and running average
cv2.accumulateWeighted(current_image, self.avg, 0.5)
frameDelta = cv2.absdiff(current_image, cv2.convertScaleAbs(self.avg))
# threshold the delta image, dilate the thresholded image to fill in holes, then find contours on thresholded image
thresh = cv2.threshold(frameDelta, self.conf["delta_thresh"], 255,cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
( _, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
#if contour meets size threshold, chi is on the move
if cv2.contourArea(c) >= self.conf["min_area"]:
motionInFrame = True
return motionInFrame
#TODO:stream.stop does not exist anymore, sigterm catcher needs to be rewritten
def stopStream(self, signal, frame):
print "recieved sigint or sigterm"
self.stream.stopped = True
time.sleep(3)
sys.exit()
if __name__ == '__main__':
chiCam = ChiWatcher()
chiCam.start()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment