Created
May 28, 2016 13:31
-
-
Save vishvanand/60afc7b998d885df66e9c4f07cf57ab0 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# import the necessary packages | |
import argparse | |
import datetime | |
import imutils | |
import time | |
import cv2 | |
#import subprocess | |
#subprocess.call("sudo modprobe bcm2835-v4l2") | |
# construct the argument parser and parse the arguments | |
ap = argparse.ArgumentParser() | |
ap.add_argument("-v", "--video", help="path to the video file") | |
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size") | |
args = vars(ap.parse_args()) | |
def detectDirection(motionvecs): | |
if(len(motionvecs) != 0): | |
return ((motionvecs[len(motionvecs)-1][0] - motionvecs[0][0]) < 0) | |
return 0 | |
# if the video argument is None, then we are reading from webcam | |
if args.get("video", None) is None: | |
camera = cv2.VideoCapture(0) | |
time.sleep(0.25) | |
# otherwise, we are reading from a video file | |
else: | |
camera = cv2.VideoCapture(args["video"]) | |
# initialize the first frame in the video stream | |
firstFrame = None | |
motionvecs = [] | |
(g,f) = camera.read() | |
time.sleep(1) | |
(a,b) = camera.read() | |
time.sleep(1) | |
# loop over the frames of the video | |
text = 'Unoccupied' | |
while True: | |
# grab the current frame and initialize the occupied/unoccupied | |
# text | |
(grabbed, frame) = camera.read() | |
# if the frame could not be grabbed, then we have reached the end | |
# of the video | |
if not grabbed: | |
break | |
# resize the frame, convert it to grayscale, and blur it | |
frame = imutils.resize(frame, width=500) | |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
gray = cv2.GaussianBlur(gray, (21, 21), 0) | |
# if the first frame is None, initialize it | |
if firstFrame is None: | |
firstFrame = gray | |
time.sleep(5) | |
continue | |
# compute the absolute difference between the current frame and | |
# first frame | |
frameDelta = cv2.absdiff(firstFrame, gray) | |
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] | |
# dilate the thresholded image to fill in holes, then find contours | |
# on thresholded image | |
thresh = cv2.dilate(thresh, None, iterations=2) | |
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, | |
cv2.CHAIN_APPROX_SIMPLE) | |
# loop over the contours | |
if(len(cnts) > 0): | |
for c in cnts: | |
# if the contour is too small, ignore it | |
if cv2.contourArea(c) < args["min_area"]: | |
continue | |
# compute the bounding box for the contour, draw it on the frame, | |
# and update the text | |
(x, y, w, h) = cv2.boundingRect(c) | |
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) | |
text = "Occupied" | |
motionvecs.append([x,y]) | |
else: | |
if(text == 'Occupied'): | |
print(detectDirection(motionvecs)) | |
motionvecs = [] | |
text = 'Unoccupied' | |
# draw the text and timestamp on the frame | |
cv2.putText(frame, "Room Status: {}".format(text), (10, 20), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) | |
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), | |
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) | |
# show the frame and record if the user presses a key | |
cv2.imshow("Security Feed", frame) | |
cv2.imshow("Thresh", thresh) | |
cv2.imshow("Frame Delta", frameDelta) | |
key = cv2.waitKey(1) & 0xFF | |
# if the `q` key is pressed, break from the lop | |
if key == ord("q"): | |
break | |
# cleanup the camera and close any open windows | |
camera.release() | |
cv2.destroyAllWindows() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
I tried put a video.mp4 but it's always reading the webcam source