Skip to content

Instantly share code, notes, and snippets.

@KevinPatel04
Created March 30, 2020 16:24
Show Gist options
  • Save KevinPatel04/a89a8c04567f6a6be7782831e4652746 to your computer and use it in GitHub Desktop.
Save KevinPatel04/a89a8c04567f6a6be7782831e4652746 to your computer and use it in GitHub Desktop.
import argparse
import datetime
import imutils
import math
import cv2
import numpy as np
width = 800
textIn = 0
textOut = 0
total = textIn
# set the capacity of the hall here we have set this to 1 by default
THRESHOLD = 1
# function to check that the person have moved outside
def testIntersectionOut(x, y):
res = -450 * x + 400 * y + 157500
if ((res >= -500) and (res < 550)):
print(str(res))
return True
return False
# function to check that the person have moved inside
def testIntersectionIn(x, y):
res = -450 * x + 400 * y + 180000
if ((res >= -500) and (res <= 550)):
print(str(res))
return True
return False
if __name__ == "__main__":
# set the test video path here
# download the test video from https://drive.google.com/file/d/1935x6wqD0qjPM3marxC-bihZFLafZBi3/view?usp=sharing
PATH_TO_STREAM = "test2.mp4"
# initialize the stream
camera = cv2.VideoCapture(PATH_TO_STREAM)
# initialize the first frame to None by default
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
# read the frame from the stream
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
cv2.putText(frame, "IN".format(str(textIn)), (30, 200),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)
cv2.putText(frame, "OUT".format(str(textIn)), (700, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)
# resize the frame, convert it to grayscale, and blur it applying gaussian blur with kernel size of 21 x 21
frame = imutils.resize(frame, width=width)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image twice to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < 12000:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# draw lines to denote the boundary that saprate IN and OUT
cv2.line(frame, (width // 2, 0), (width, 450), (250, 0, 1), 2) # blue line OUT
cv2.line(frame, (width // 2 - 50, 0), (width - 50, 450), (0, 0, 255), 2) # red line IN
# draw the center point of the contour
rectagleCenterPont = ((x + x + w) // 2, (y + y + h) // 2)
cv2.circle(frame, rectagleCenterPont, 1, (0, 0, 255), 5)
# check if is the person is leaving then decrement the total counter and increment textOut counter
if (testIntersectionOut((x + x + w) // 2, (y + y + h) // 2)):
textOut += 1
total -= 1
# check if is the person is leaving then increment the total counter and increment textIn counter
if (testIntersectionIn((x + x + w) // 2, (y + y + h) // 2)):
textIn += 1
total += 1
# check if thresold limit is reached and add the respective labels
if total >= THRESHOLD:
cv2.putText(frame, "Hall is: {}".format(str("Full")), (10, 120),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
if total < THRESHOLD:
cv2.putText(frame, "Hall is: {}".format(str("Open")), (10, 120),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(frame, "Total In: {}".format(str(textIn)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, "Total Out: {}".format(str(textOut)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, "Current In: {}".format(str(total)), (10, 100),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow("Frame",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment