Created
May 19, 2017 14:53
-
-
Save smeschke/50bd3a9ecee3cdbd631662d4e9b97d6b to your computer and use it in GitHub Desktop.
box tracking
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
import numpy as np | |
cap = cv2.VideoCapture('/home/stephen/Desktop/fedex.avi') | |
h,s,v,h1,s1,v1 = 104,0,134,133,255,255#values for the blue | |
morph = 1 | |
last_box_depart = -10000 #frame number that the last box departed | |
tracking_box = False | |
box_ready = False | |
def only_color(frame, (b,r,g,b1,r1,g1), morph): | |
# Convert BGR to HSV | |
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) | |
# define range of blue color in HSV | |
lower = np.array([b,r,g]) | |
upper = np.array([b1,r1,g1]) | |
# Threshold the HSV image to get only blue colors | |
mask = cv2.inRange(hsv, lower, upper) | |
#define kernel size (for touching up the image) | |
kernel = np.ones((morph, morph),np.uint8) | |
#touch up | |
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) | |
# Bitwise-AND mask and original image | |
res = cv2.bitwise_and(frame,frame, mask= mask) | |
return res, mask | |
#finds the largest contour in a list of contours | |
#returns a single contour | |
def largest_contour(contours): | |
c = max(contours, key=cv2.contourArea) | |
return c[0] | |
#takes an image and the threshold value returns the contours | |
def get_contours(im, threshold_value): | |
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) | |
_ ,thresh = cv2.threshold(imgray,threshold_value,255,0) | |
_, contours, _ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |
return contours | |
#parameter of lk optical flow | |
lk_params = dict(winSize = (10,10), | |
maxLevel = 20000, | |
criteria = (cv2.TERM_CRITERIA_EPS | | |
cv2.TERM_CRITERIA_COUNT, 10, 0.03)) | |
#finds the center of a contour | |
#takes a single contour | |
#returns (x,y) position of the contour | |
def contour_center(c): | |
M = cv2.moments(c) | |
try: center = int(M['m10']/M['m00']), int(M['m01']/M['m00']) | |
except: center = 0,0 | |
return center | |
#check and see if a box occupies the box detection area | |
def is_new_box(frame): | |
#select roi (the box detection area) | |
roi = frame[150:450, 825:1025] | |
res, mask = only_color(roi, (h,s,v,h1,s1,v1), 1) | |
if sum(sum(mask))>6000: | |
ccc = (largest_contour(get_contours(res, 0)))[0][0]+825, (largest_contour(get_contours(res, 0)))[0][1]+150 | |
return True, ccc | |
else: return False, False | |
frame_number = 0 | |
#list contains information about boxes | |
boxes = [] | |
font = cv2.FONT_HERSHEY_SIMPLEX | |
#main loop of the program | |
while True: | |
#read image from the video | |
_, img = cap.read() | |
#try/except to see if video is over, and convert frame to gray | |
try: old_gray = img_gray.copy() | |
except: old_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
try: img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
except: break | |
if tracking_box: | |
#try and track using opticalk flow | |
p1, error, _ = cv2.calcOpticalFlowPyrLK(old_gray, img_gray, | |
p0, None, **lk_params) | |
#convert the coordinates to two intigers | |
xy = int(p1[0][0][0]), int(p1[0][0][1]) | |
#update p0 to p1 | |
p0 = p1 | |
#make a circle around the object being tracked | |
cv2.circle(img, xy, 45, (244,4,4), 6) | |
#if no box is ready and it has been 200 since the last box | |
if box_ready==False and frame_number-last_box_depart > 100: | |
#check and see if there is a new box | |
new_box = is_new_box(img) | |
#draw rectangles and put text to show where areas are in the frame | |
pallet_a = (130,60),(475,350) | |
pallet_b = (130,410),(475,700) | |
cv2.rectangle(img, pallet_a[0], pallet_a[1], (0,255,0),3) | |
cv2.putText(img, 'Pallet A', (200, 50), font, 1.0, (0,255,0), 2, cv2.LINE_AA) | |
cv2.rectangle(img, pallet_b[0], pallet_b[1], (0,0,255),3) | |
cv2.putText(img, 'Pallet B', (200,400), font, 1.0, (0,0,255), 2, cv2.LINE_AA) | |
cv2.rectangle(img, pallet_b[0], pallet_b[1], (0,0,255),3) | |
bda = (850,150), (1025,420) | |
cv2.putText(img, 'Box Detection Area', (800,120), font, 1.0, (0,0,0), 2, cv2.LINE_AA) | |
cv2.rectangle(img, bda[0], bda[1], (0,0,0),6) | |
#check and see if the box has gone into the pallet areas | |
#if a box is being tracked | |
if tracking_box: | |
#if the box has passed into the pallet areas | |
if xy[0]<350: | |
#if the box is in pallet a | |
if xy[1]<400: | |
print 'box has entered pallet a', xy | |
tracking_box = False | |
boxes.append((len(boxes), 'A')) | |
else: | |
print 'box has entered pallet b' | |
boxes.append((len(boxes), 'B')) | |
tracking_box = False | |
bb = 0 | |
for box in boxes: | |
bb+=1 | |
cv2.putText(img, str(box[0])+' '+box[1], (10,45+45*bb), font, 1.2, (255,255,255), 2, cv2.LINE_AA) | |
#show the image and wait | |
cv2.imshow('img', img) | |
k=cv2.waitKey(1) | |
if k==27: break | |
#if no box is currently being tracked | |
if not tracking_box: | |
#check and see if there has been a new box | |
if new_box[0]==True: | |
print 'Found new box at:', new_box[1] | |
tracking_box = True | |
#new click, update p0 | |
p0 = np.array([[new_box[1]]], np.float32) | |
#increment frame number | |
frame_number += 1 | |
#release the video to avoid memory leaks, and close the window | |
cap.release() | |
cv2.destroyAllWindows() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment