Skip to content

Instantly share code, notes, and snippets.

@jatinkrmalik
Last active October 6, 2017 08:20
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jatinkrmalik/083733840a0d1836063bbfd96c98c514 to your computer and use it in GitHub Desktop.
Save jatinkrmalik/083733840a0d1836063bbfd96c98c514 to your computer and use it in GitHub Desktop.
Face detection using DLIB and OpenCV3
# !/usr/bin/python
import time
from modules.faceOnboarding import service
from flask import Response
from modules.faceOnboarding import app
import tempfile
@app.app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.app.route('/stop_feed')
def stop_feed():
#vs.stop()
return
# Import the OpenCV and dlib libraries
import cv2
import dlib
import asyncio
import threading
# Initialize a face cascade using the frontal face haar cascade provided with
# the OpenCV library
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
# The deisred output width and height
OUTPUT_SIZE_WIDTH = 775
OUTPUT_SIZE_HEIGHT = 600
maxThreads = 5
threadPool = []
threadList = [[], [], [], [], [], [], []]
class myThread (threading.Thread):
sleepTime = 1000
def __init__(self, index, imageList):
threading.Thread.__init__(self)
self.threadID = index
self.imageList = imageList
def run(self):
while(True):
try:
if len(self.imageList) > 0:
filename = self.imageList.pop()
service.check_face(open(filename, "rb"))
except Exception as e:
print("Error thread" + str(self.threadID), e.args)
time.sleep(0.2)
def gen():
# Open the first webcame device
capture = cv2.VideoCapture(0)
if len(threadPool) == 0:
for index in range(0, maxThreads):
thread1 = myThread(index, threadList[index])
thread1.start()
threadPool.append(thread1)
# Create the tracker we will use
tracker = dlib.correlation_tracker()
# The variable we use to keep track of the fact whether we are
# currently using the dlib tracker
trackingFace = 0
# The color of the rectangle we draw around the face
rectangleColor = (0, 165, 255)
faceTracked = 0
rollingThreadId = 0
try:
while True:
# Retrieve the latest image from the webcam
rc, fullSizeBaseImage = capture.read()
# Resize the image to 320x240
baseImage = cv2.resize(fullSizeBaseImage, (320, 240))
# Result image is the image we will show the user, which is a
# combination of the original image from the webcam and the
# overlayed rectangle for the largest face
resultImage = baseImage.copy()
# If we are not tracking a face, then try to detect one
if not trackingFace:
faceTracked = 0
# For the face detection, we need to make use of a gray
# colored image so we will convert the baseImage to a
# gray-based image
gray = cv2.cvtColor(baseImage, cv2.COLOR_BGR2GRAY)
# Now use the haar cascade detector to find all faces
# in the image
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
# In the console we can show that only now we are
# using the detector for a face
print("Using the cascade detector to detect face")
# For now, we are only interested in the 'largest'
# face, and we determine this based on the largest
# area of the found rectangle. First initialize the
# required variables to 0
maxArea = 0
x = 0
y = 0
w = 0
h = 0
# Loop over all faces and check if the area for this
# face is the largest so far
# We need to convert it to int here because of the
# requirement of the dlib tracker. If we omit the cast to
# int here, you will get cast errors since the detector
# returns numpy.int32 and the tracker requires an int
for (_x, _y, _w, _h) in faces:
if _w * _h > maxArea:
x = int(_x)
y = int(_y)
w = int(_w)
h = int(_h)
maxArea = w * h
# If one or more faces are found, initialize the tracker
# on the largest face in the picture
if maxArea > 0:
# Initialize the tracker
tracker.start_track(baseImage,
dlib.rectangle(x - 10,
y - 20,
x + w + 10,
y + h + 20))
# Set the indicator variable such that we know the
# tracker is tracking a region in the image
trackingFace = 1
# Check if the tracker is actively tracking a region in the image
if trackingFace:
print("Tracking", faceTracked)
# Update the tracker and request information about the
# quality of the tracking update
trackingQuality = tracker.update(baseImage)
# If the tracking quality is good enough, determine the
# updated position of the tracked region and draw the
# rectangle
if trackingQuality >= 8.75:
tracked_position = tracker.get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
cv2.rectangle(resultImage, (t_x, t_y),
(t_x + t_w, t_y + t_h),
rectangleColor, 2)
else:
# If the quality of the tracking update is not
# sufficient (e.g. the tracked region moved out of the
# screen) we stop the tracking of the face and in the
# next loop we will find the largest face in the image
# again
trackingFace = 0
faceTracked += 1
if (faceTracked % 5) == 0:
tempFile = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False);
print(tempFile.name)
cv2.imwrite(tempFile.name, fullSizeBaseImage)
if len(threadList[rollingThreadId]) == 0:
threadList[rollingThreadId].append(tempFile.name)
rollingThreadId += 1
rollingThreadId = rollingThreadId % maxThreads
#reset after 40 frames this will help avoid cases where the tracking can be stuck.
#this will also effect any other calculation that we may do for a new face.
if faceTracked == 40:
trackingFace = 0
# Since we want to show something larger on the screen than the
# original 320x240, we resize the image again
#
# Note that it would also be possible to keep the large version
# of the baseimage and make the result image a copy of this large
# base image and use the scaling factor to draw the rectangle
# at the right coordinates.
largeResult = cv2.resize(resultImage,
(OUTPUT_SIZE_WIDTH, OUTPUT_SIZE_HEIGHT))
# Finally, we want to show the images on the screen
# cv2.imshow("base-image", baseImage)
# cv2.imshow("result-image", largeResult)
tempFile = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False);
cv2.imwrite(tempFile.name, largeResult)
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + open(tempFile.name, 'rb').read() + b'\r\n')
# To ensure we can also deal with the user pressing Ctrl-C in the console
# we have to check for the KeyboardInterrupt exception and destroy
# all opencv windows and exit the application
except KeyboardInterrupt as e:
cv2.destroyAllWindows()
exit(0)
if __name__ == '__main__':
gen()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment