Skip to content

Instantly share code, notes, and snippets.

@xictus77
Created January 17, 2022 13:25
Show Gist options
  • Save xictus77/9fd7592764c8b999371ba5bf7ca3dd24 to your computer and use it in GitHub Desktop.
Save xictus77/9fd7592764c8b999371ba5bf7ca3dd24 to your computer and use it in GitHub Desktop.
Main gist of obstruction detection
# loop over the frames of the video
while True:
# grab the current frame and initialize the text
frame = vs.read()
status = 0
frame = frame if args.get("video", None) is None else frame[1]
text = "CLEAR FROM OBSTRUCTION"
textcolor = GREEN
# if the frame could not be grabbed, then we have reached the end of the video
if frame is None:
break
# drawing of ROI
frame = cv2.polylines(frame, polygon, True, (255, 0, 0), thickness=1)
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=960)
frame1 = frame.copy()
total_frames = total_frames + 1
# print(frame.shape)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# # if the frame dimensions are None, grab them - added for centroid tracker
if W is None or H is None:
(H, W) = frame.shape[:2]
# if the first frame is None, initialize it
# This is used to store the first image / frame of the video
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
rects = [] # centroid tracker - List of bounding rectangles
# cv2.polylines(frame, polygon, True, BLACK, 3)
# frame = cv2.polylines(frame, polygon, True, (255, 0, 0), thickness=1)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
status = 1
# compute the bounding box for the contour, draw it on the frame and update the text
(x, y, w, h) = cv2.boundingRect(c)
# print(cv2.boundingRect(c))
# rects.append(cv2.boundingRect(c)) # centroid tracker
rects.append((x, y, x + w, y + h)) # centroid tracker - determine the centroid from bounding rectangle
# print(rects)
# cv2.rectangle(frame, (x, y), (x + w, y + h), RED, 2)
text = "OBJECTS DETECTED"
textcolor = WHITE
boundingboxes = np.array(rects)
boundingboxes = boundingboxes.astype(int)
rects = non_max_suppression_fast(boundingboxes, 0.3)
# update our centroid tracker using the computed set of bounding box rectangles
objects = ct.update(rects)
# print(objects)
# loop over the tracked objects
# for (objectID, centroid) in objects.items():
# # draw both the ID of the object and the centroid of the object on the output frame
# object_id = "ID {}".format(objectID)
# cv2.putText(frame, object_id, (centroid[0] - 10, centroid[1] - 10),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
for (objectId, bbox) in objects.items():
x1, y1, x2, y2 = bbox
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
# print("objectId", objectId)
# print("bbox", bbox)
# Check if bbox is in ROI and return a value 1 if inside, 0 if on contour, -1 if outside
# in_roi_id = cv2.pointPolygonTest(np.int32(points), (x1, y1), False)
in_roi_id = cv2.pointPolygonTest(points_resized, (x1, y1), False)
# print (x1, y1)
# print(in_roi_id, x1, y1)
if objectId not in object_id_list:
object_id_list.append(objectId)
dtime[objectId] = datetime.now()
dwell_time[objectId] = 0
else:
curr_time = datetime.now()
old_time = dtime[objectId]
time_diff = curr_time - old_time
dtime[objectId] = datetime.now()
sec = time_diff.total_seconds()
# min = sec * 60
dwell_time[objectId] += sec
# if int(dwell_time[objectId]) > 10:
# cv2.imwrite('detected'+str(objectId)+'.jpeg', frame)
# cv2.rectangle(frame, (x1, y1), (x2, y2), RED, 2)
# text_id_time = "ID:{}| Time:{}".format(objectId, int(dwell_time[objectId]))
# cv2.putText(frame, text_id_time, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, RED, 1)
if dwell_time[objectId] >= allowable_duration and in_roi_id >= 0:
cv2.rectangle(frame, (x1, y1), (x2, y2), RED, 2)
text = "ALERT! STATIC OBSTRUCTION DETECTED"
textcolor = RED
text_id_time = "ID:{}| Time:{}".format(objectId, int(dwell_time[objectId]))
cv2.putText(frame, text_id_time, (x2, y2), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, RED, 1)
cv2.imwrite('detected_ID_'+str(objectId)+'.jpeg', frame)
out_det.write(frame)
# draw the text and timestamp on the frame
cv2.putText(frame, "Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, textcolor, 2)
cv2.putText(frame, datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, BLACK, 1)
# show the frame and record
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
# Record
out.write(frame)
key = cv2.waitKey(30) & 0xFF
# if the `q` key is pressed, break from the loop
if key == ord('q'):
break
if key == ord('p'):
cv2.waitKey(-1) #wait until any key is pressed
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment