Skip to content

Instantly share code, notes, and snippets.

@KevinPatel04
Last active April 26, 2020 17:25
Show Gist options
  • Save KevinPatel04/b7fae1d214e16af81bbdac1da49be188 to your computer and use it in GitHub Desktop.
Save KevinPatel04/b7fae1d214e16af81bbdac1da49be188 to your computer and use it in GitHub Desktop.
Face Blurring Blog
# load the image to be detected
image = cv2.imread('test/family.jpg')
# find all face locations using face_locations() function
# model can be "cnn" or "hog"
# number_of_times_to_upsample = 1 higher and detect more smaller faces from the image
all_face_locations = face_recognition.face_locations(image, model="hog")
#printing the number of faces in the array
print("There are {} face(s) in this image".format(len(all_face_locations)))
# create copy of original image
bbox_image = image.copy()
# loop over each face detected in the image
# draw box over the image
for face_location in all_face_locations:
top_pos, right_pos, bottom_pos, left_pos = face_location
#---------------------------------------------------------------------------
# Face Blurring
#---------------------------------------------------------------------------
# Slice frame image array by positions face_locations
face_image = bbox_image[top_pos:bottom_pos, left_pos:right_pos]
# Blur the current face image
face_image = cv2.GaussianBlur(face_image, (99,99), 30)
# Put the blurred face region back into the frame image
bbox_image[top_pos:bottom_pos, left_pos:right_pos] = face_image
#---------------------------------------------------------------------------
# draw bbox around face
bbox_image = cv2.rectangle(bbox_image, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2)
# show image
cv2.imshow("Blurred Faces",bbox_image)
# only for google colab
# cv2_imshow(bbox_image)
•
├── test
│   ├── family.jpg
│   └── face-demographics-walking.mp4
├── face-blurring-on-image.py
└── realtime-face-blurring.py
# import required packages
import face_recognition
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# import required packages
import face_recognition
import cv2
# only for google colab
# from google.colab.patches import cv2_imshow
# for using your inbuilt webcam
# Get the webcam #0 ( the default one, 1, 2 and so on)
# video_stream = cv2.VideoCapture(0)
# for using video file
video_file_path = 'test/face-demographics-walking.mp4'
video_stream = cv2.VideoCapture(video_file_path)
# initialize the number of frame needed to be skipped
skip = 0
while True:
#Read the frame
for i in range(skip):
video_stream.grab()
# get current frame
stat, frame = video_stream.read()
# check that frame is left to read
if stat == False:
print('Video has ended')
break
# reduce size of image to 50%
scaled_frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
# find all face locations using face_locations() function
# model can be "cnn" or "hog"
# number_of_times_to_upsample = 1 higher and detect more faces
all_face_locations = face_recognition.face_locations(scaled_frame, model="cnn")
for index,face_location in enumerate(all_face_locations):
# split the tuple
top_pos, right_pos, bottom_pos, left_pos = face_location
# scale up the coordinates by 2 times
top_pos = top_pos * 2
right_pos = right_pos * 2
bottom_pos = bottom_pos * 2
left_pos = left_pos * 2
#---------------------------------------------------------------------------
# Face Blurring
#---------------------------------------------------------------------------
# Slice frame image array by positions face_locations
face_image = frame[top_pos:bottom_pos, left_pos:right_pos]
# Blur the current face image
face_image = cv2.GaussianBlur(face_image, (99,99), 30)
# Put the blurred face region back into the frame image
frame[top_pos:bottom_pos, left_pos:right_pos] = face_image
#---------------------------------------------------------------------------
# draw rectangle around each face
frame = cv2.rectangle(frame, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2)
# display frame
cv2.imshow("Blurred Face Video",frame)
# for only google colab
# cv2_imshow(frame)
# Press 'q' on the keyboard to break the while loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the video stream resource
video_stream.release()
cv2.destroyAllWindows()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment