Skip to content

Instantly share code, notes, and snippets.

@KevinPatel04
Last active April 20, 2020 12:17
Show Gist options
  • Save KevinPatel04/b4c17c4359914961e253369332053b5e to your computer and use it in GitHub Desktop.
Save KevinPatel04/b4c17c4359914961e253369332053b5e to your computer and use it in GitHub Desktop.
Real time Face Detection Blog
# import required packages
import dlib
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# for using your inbuilt webcam
# Get the webcam #0 ( the default one, 1, 2 and so on)
# video_stream = cv2.VideoCapture(0)
# for using video file
video_file_path = 'video/face-demographics-walking.mp4'
video_stream = cv2.VideoCapture(video_file_path)
# initialize dlib's face detectors
# CNN face detector with mmod_human_face_detector.dat weights
weights = 'models/mmod_human_face_detector.dat'
cnn_face_detector = dlib.cnn_face_detection_model_v1(weights)
# initialize the number of frame needed to be skipped
skip = 0
while True:
#Read the frame
for i in range(skip):
video_stream.grab()
# get current frame
stat, frame = video_stream.read()
# check that frame is left to read
if stat == False:
print('Video has ended')
break
# reduce size of image to 50%
scaled_frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
# find all face locations using cnn_face_detector
# First argument is the image array
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more smaller faces from the image.
all_face_locations = cnn_face_detector(scaled_frame, 1)
for index,face_location in enumerate(all_face_locations):
# split the tuple
# scale up the coordinates by 2 times
top_pos = face_location.rect.top() * 2
right_pos = face_location.rect.right() * 2
bottom_pos = face_location.rect.bottom() * 2
left_pos = face_location.rect.left() * 2
# draw rectangle around each face
frame = cv2.rectangle(frame, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2)
# display frame
cv2.imshow("Video",frame)
# for only google colab
# cv2_imshow(frame)
# Press 'q' on the keyboard to break the while loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the video stream resource
video_stream.release()
cv2.destroyAllWindows()
# initialize the number of frame needed to be skipped
skip = 0
while True:
#Read the frame
for i in range(skip):
video_stream.grab()
# get current frame
stat, frame = video_stream.read()
# check that frame is left to read
if stat == False:
print('Video has ended')
break
# reduce size of image to 50%
scaled_frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
# find all face locations using hog_face_detector
# First argument is the image array
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more smaller faces from the image.
all_face_locations = hog_face_detector(scaled_frame, 1)
for index,face_location in enumerate(all_face_locations):
# split the tuple
# scale up the coordinates by 2 times
top_pos = face_location.top() * 2
right_pos = face_location.right() * 2
bottom_pos = face_location.bottom() * 2
left_pos = face_location.left() * 2
# draw rectangle around each face
frame = cv2.rectangle(frame, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2)
# display frame
cv2.imshow("Video",frame)
# for only google colab
# cv2_imshow(frame)
# Press 'q' on the keyboard to break the while loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the video stream resource
video_stream.release()
cv2.destroyAllWindows()
•
├── video
│   └── face-demographics-walking.mp4
├── models
│   └── mmod_human_face_detector.dat
├── cnn-realtime-face-detection.py
├── hog-realtime-face-detection.py
└── face-recognition-realtime-face-detection.py
# import required packages
import dlib
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# for using your inbuilt webcam
# Get the webcam #0 ( the default one, 1, 2 and so on)
# video_stream = cv2.VideoCapture(0)
# for using video file
video_file_path = 'video/face-demographics-walking.mp4'
video_stream = cv2.VideoCapture(video_file_path)
# initialize HOG face detection model
hog_face_detector = dlib.get_frontal_face_detector()
# initialize the number of frame needed to be skipped
skip = 0
while True:
#Read the frame
for i in range(skip):
video_stream.grab()
# get current frame
stat, frame = video_stream.read()
# check that frame is left to read
if stat == False:
print('Video has ended')
break
# reduce size of image to 50%
scaled_frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
# find all face locations using hog_face_detector
# First argument is the image array
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more smaller faces from the image.
all_face_locations = hog_face_detector(scaled_frame, 1)
for index,face_location in enumerate(all_face_locations):
# split the tuple
# scale up the coordinates by 2 times
top_pos = face_location.top() * 2
right_pos = face_location.right() * 2
bottom_pos = face_location.bottom() * 2
left_pos = face_location.left() * 2
# draw rectangle around each face
frame = cv2.rectangle(frame, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2)
# display frame
cv2.imshow("Video",frame)
# for only google colab
# cv2_imshow(frame)
# Press 'q' on the keyboard to break the while loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the video stream resource
video_stream.release()
cv2.destroyAllWindows()
# import required packages
import dlib
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# for using your inbuilt webcam
# Get the webcam #0 ( the default one, 1, 2 and so on)
# video_stream = cv2.VideoCapture(0)
# for using video file
video_file_path = 'video/face-demographics-walking.mp4'
video_stream = cv2.VideoCapture(video_file_path)
# initialize HOG face detection model
hog_face_detector = dlib.get_frontal_face_detector()
# import required packages
import face_recognition
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# for using your inbuilt webcam
# Get the webcam #0 ( the default one, 1, 2 and so on)
# video_stream = cv2.VideoCapture(0)
# for using video file
video_file_path = 'video/face-demographics-walking.mp4'
video_stream = cv2.VideoCapture(video_file_path)
# initialize the number of frame needed to be skipped
skip = 0
while True:
#Read the frame
for i in range(skip):
video_stream.grab()
# get current frame
stat, frame = video_stream.read()
# check that frame is left to read
if stat == False:
print('Video has ended')
break
# reduce size of image to 50%
scaled_frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
# find all face locations using face_locations() function
# model can be "cnn" or "hog"
# number_of_times_to_upsample = 1 higher and detect more faces
all_face_locations = face_recognition.face_locations(scaled_frame, model="cnn")
for index,face_location in enumerate(all_face_locations):
# split the tuple
top_pos, right_pos, bottom_pos, left_pos = face_location
# scale up the coordinates by 2 times
top_pos = top_pos * 2
right_pos = right_pos * 2
bottom_pos = bottom_pos * 2
left_pos = left_pos * 2
# draw rectangle around each face
frame = cv2.rectangle(frame, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2)
# display frame
cv2.imshow("Video",frame)
# for only google colab
# cv2_imshow(frame)
# Press 'q' on the keyboard to break the while loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the video stream resource
video_stream.release()
cv2.destroyAllWindows()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment