Skip to content

Instantly share code, notes, and snippets.

@KevinPatel04
Last active April 20, 2020 12:08
Show Gist options
  • Save KevinPatel04/5f4cd79b082c7f7a26748338a459e715 to your computer and use it in GitHub Desktop.
Save KevinPatel04/5f4cd79b082c7f7a26748338a459e715 to your computer and use it in GitHub Desktop.
Face Detection Image Blog
# import required packages
import dlib
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# load the image to be detected
image = cv2.imread('images/modi-obama-1.jpg')
# initialize dlib's face detectors
# CNN face detector with mmod_human_face_detector.dat weights
weights = 'models/mmod_human_face_detector.dat'
cnn_face_detector = dlib.cnn_face_detection_model_v1(weights)
# find all face locations using cnn_face_detector
# First argument is the image array
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more smaller faces from the image.
all_face_locations = cnn_face_detector(image, 1)
# initialize the color tupple (B,G,R) for choosing the color of the bounding box
color = (0,0,255)
# initialize stroke width
stroke = 2
# create copy of original image so that original image is not modified
bbox_image = image.copy()
# loop over each face detected in the image
for face_location in all_face_locations:
top_pos = face_location.rect.top()
right_pos = face_location.rect.right()
bottom_pos = face_location.rect.bottom()
left_pos = face_location.rect.left()
# draw bbox around face
bbox_image = cv2.rectangle(bbox_image, (left_pos,top_pos), (right_pos,bottom_pos), color, stroke)
# show image
cv2.imshow("Faces Detected",bbox_image)
# only for google colab
# cv2_imshow(bbox_image)
# add the following code to crop the faces from the image
# loop over each face detected in the image
for index, face_location in enumerate(all_face_locations):
top_pos = face_location.top()
right_pos = face_location.right()
bottom_pos = face_location.bottom()
left_pos = face_location.left()
face = image[top_pos:bottom_pos,left_pos:right_pos]
#---------------------------------------------------------------------------------
# show face image
#---------------------------------------------------------------------------------
cv2.imshow("Face No: "+str(index),face)
# only for google colab
# cv2_imshow(face)
#---------------------------------------------------------------------------------
# write / save the frame on your device
#---------------------------------------------------------------------------------
cv2.imwrite(face,"Face{}.jpg".format(index))
•
├── images
│   ├── modi-obama-1.jpg
│   └── modi-obama-2.jpg
├── models
│   └── mmod_human_face_detector.dat
├── cnn-face-detector.py
├── hog-face-detector.py
└── face-recognition-detector.py
# initialize the color tupple (B,G,R) for choosing the color of the bounding box
color = (0,0,255)
# initialize stroke width
stroke = 2
# create copy of original image so that original image is not modified
bbox_image = image.copy()
# loop over each face detected in the image
for face_location in all_face_locations:
top_pos = face_location.top()
right_pos = face_location.right()
bottom_pos = face_location.bottom()
left_pos = face_location.left()
# draw bbox around face
bbox_image = cv2.rectangle(bbox_image, (left_pos,top_pos), (right_pos,bottom_pos), color, stroke)
# show image
cv2.imshow("Faces Detected",bbox_image)
# only for google colab
# cv2_imshow(bbox_image)
# import required packages
import face_recognition
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# load the image to be detected
image = cv2.imread('images/modi-obama-1.jpg')
# find all face locations using face_locations() function
# model can be "hog" or "cnn"
# number_of_times_to_upsample = 1 higher and detect more smaller faces from the image
all_face_locations = face_recognition.face_locations(image, model="hog")
# initialize the color tupple (B,G,R) for choosing the color of the bounding box
color = (0,0,255)
# initialize stroke width
stroke = 2
# create copy of original image
bbox_image = image.copy()
# loop over each face detected in the image
# draw box over the image
for face_location in all_face_locations:
top_pos, right_pos, bottom_pos, left_pos = face_location
# draw bbox around face
bbox_image = cv2.rectangle(bbox_image, (left_pos,top_pos), (right_pos,bottom_pos), color, stroke)
# show image
cv2.imshow("Faces Detected",bbox_image)
# only for google colab
# cv2_imshow(bbox_image)
# import required packages
import dlib
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# load the image to be detected
image = cv2.imread('images/modi-obama-1.jpg')
# initialize dlib's face detectors
# HOG face detector
hog_face_detector = dlib.get_frontal_face_detector()
# find all face locations using hog_face_detector
# First argument is the image array
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more smaller faces from the image.
all_face_locations = hog_face_detector(image, 1)
# initialize the color tupple (B,G,R) for choosing the color of the bounding box
color = (0,0,255)
# initialize stroke width
stroke = 2
# create copy of original image so that original image is not modified
bbox_image = image.copy()
# loop over each face detected in the image
for face_location in all_face_locations:
top_pos = face_location.top()
right_pos = face_location.right()
bottom_pos = face_location.bottom()
left_pos = face_location.left()
# draw bbox around face
bbox_image = cv2.rectangle(bbox_image, (left_pos,top_pos), (right_pos,bottom_pos), color, stroke)
# show image
cv2.imshow("Faces Detected",bbox_image)
# only for google colab
# cv2_imshow(bbox_image)
# import required packages
import dlib
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# initialize dlib's face detectors
# HOG face detector
hog_face_detector = dlib.get_frontal_face_detector()
# CNN face detector with mmod_human_face_detector.dat weights
weights = 'models/mmod_human_face_detector.dat'
cnn_face_detector = dlib.cnn_face_detection_model_v1(weights)
# find all face locations using hog_face_detector or cnn_face_detector
# First argument is the image array
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more smaller faces from the image.
all_face_locations = hog_face_detector(image, 1)
#printing the number of faces in the array
print("There are {} face(s) in this image".format(len(all_face_locations)))
# dlib's face detectors detect faces and returns the top_pos, right_pos, bottom_pos, left_pos location coordinates
# print contents of all_face_locations
print(all_face_locations)
# load the image to be detected
image = cv2.imread('images/modi-obama-1.jpg')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment