•
├── test
│ ├── modi-obama-1.jpg
│ ├── modi-obama-2.jpg
│ └── face-demographics-walking.mp4
├── models
│ ├── age_deploy.prototxt
│ └── age_net.caffemodel
├── age-prediction-from-image.py
└── realtime-age-prediction.py
Last active
April 26, 2020 10:02
-
-
Save KevinPatel04/ad22269fd0c5c2a5de3725b90a400047 to your computer and use it in GitHub Desktop.
Real time Age Prediction Blog
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# capture the video from default camera | |
# video_stream = cv2.VideoCapture(0) | |
# read video from video file | |
video_file_path = 'test/face-demographics-walking.mp4' | |
video_stream = cv2.VideoCapture(video_file_path) | |
# initialize the number of frame needed to be skipped | |
skip = 0 | |
while True: | |
#Read the frame | |
for i in range(skip): | |
video_stream.grab() | |
# get current frame | |
stat, frame = video_stream.read() | |
# check that frame is left to read | |
if stat == False: | |
print('Video has ended') | |
break | |
# reduce size of image to 50% | |
scaled_frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5) | |
# find all face locations using face_locations() function | |
# model can be "cnn" or "hog" | |
# number_of_times_to_upsample = 1 higher and detect more faces | |
all_face_locations = face_recognition.face_locations(scaled_frame, model="cnn") | |
for index,face_location in enumerate(all_face_locations): | |
# split the tuple | |
top_pos, right_pos, bottom_pos, left_pos = face_location | |
# scale up the coordinates by 2 times | |
top_pos = top_pos * 2 | |
right_pos = right_pos * 2 | |
bottom_pos = bottom_pos * 2 | |
left_pos = left_pos * 2 | |
#--------------------------------------------------------------------------- | |
# Age Detection | |
#--------------------------------------------------------------------------- | |
# Slice frame image array by positions face_locations | |
face_image = frame[top_pos:bottom_pos, left_pos:right_pos] | |
# The 'AGE_GENDER_MODEL_MEAN_VALUES' calculated by using the numpy.mean() | |
AGE_GENDER_MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746) | |
# create blob of sliced face image | |
face_image_blob = cv2.dnn.blobFromImage(face_image, 1, (227, 227), AGE_GENDER_MODEL_MEAN_VALUES, swapRB=False) | |
# give input to age detection model | |
age_cov_net.setInput(face_image_blob) | |
# get the predicted label from the model | |
age_prediction = age_cov_net.forward() | |
# find the max value of predicted index | |
# pass the index to age_label_list to get the associated label text | |
age_label = age_label_list[age_prediction[0].argmax()] | |
# draw rectangle around each face | |
cv2.rectangle(frame, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2) | |
# disply the name of age as text along the face | |
font = cv2.FONT_HERSHEY_SIMPLEX | |
cv2.rectangle(frame, (left_pos-1, bottom_pos + 18), (right_pos+1, bottom_pos), (0, 0, 255), -1) | |
cv2.putText(frame, age_label, (left_pos+2,bottom_pos+11), font, 0.5, (255,255,255), 1) | |
#--------------------------------------------------------------------------- | |
# display frame | |
cv2.imshow("Video",frame) | |
# for only google colab | |
# cv2_imshow(frame) | |
# Press 'q' on the keyboard to break the while loop | |
if cv2.waitKey(1) & 0xFF == ord('q'): | |
break | |
# Release the video stream resource | |
video_stream.release() | |
cv2.destroyAllWindows() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# importing required libraries | |
import cv2 | |
import numpy as np | |
from keras.preprocessing import image | |
from keras.models import model_from_json | |
import face_recognition | |
# only for google colab | |
# from google.colab.patches import cv2_imshow |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# declare age range list | |
age_label_list = ['0-2','4-6','8-12','15-20','25-32','38-43','48-53','60-100'] | |
# initialize the model | |
age_prototxt = "models/age_deploy.prototxt" | |
age_caffemodel = "models/age_net.caffemodel" | |
age_cov_net = cv2.dnn.readNet(age_caffemodel, age_prototxt) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# importing required libraries | |
import cv2 | |
import numpy as np | |
from keras.preprocessing import image | |
from keras.models import model_from_json | |
import face_recognition | |
# only for google colab | |
# from google.colab.patches import cv2_imshow | |
# declare age range list | |
age_label_list = ['0-2','4-6','8-12','15-20','25-32','38-43','48-53','60-100'] | |
# initialize the model | |
age_prototxt = "models/age_deploy.prototxt" | |
age_caffemodel = "models/age_net.caffemodel" | |
age_cov_net = cv2.dnn.readNet(age_caffemodel, age_prototxt) | |
# capture the video from default camera | |
# video_stream = cv2.VideoCapture(0) | |
# read video from video file | |
video_file_path = 'test/face-demographics-walking.mp4' | |
video_stream = cv2.VideoCapture(video_file_path) | |
# initialize the number of frame needed to be skipped | |
skip = 0 | |
while True: | |
#Read the frame | |
for i in range(skip): | |
video_stream.grab() | |
# get current frame | |
stat, frame = video_stream.read() | |
# check that frame is left to read | |
if stat == False: | |
print('Video has ended') | |
break | |
# reduce size of image to 50% | |
scaled_frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5) | |
# find all face locations using face_locations() function | |
# model can be "cnn" or "hog" | |
# number_of_times_to_upsample = 1 higher and detect more faces | |
all_face_locations = face_recognition.face_locations(scaled_frame, model="cnn") | |
for index,face_location in enumerate(all_face_locations): | |
# split the tuple | |
top_pos, right_pos, bottom_pos, left_pos = face_location | |
# scale up the coordinates by 2 times | |
top_pos = top_pos * 2 | |
right_pos = right_pos * 2 | |
bottom_pos = bottom_pos * 2 | |
left_pos = left_pos * 2 | |
#--------------------------------------------------------------------------- | |
# Age Detection | |
#--------------------------------------------------------------------------- | |
# Slice frame image array by positions face_locations | |
face_image = frame[top_pos:bottom_pos, left_pos:right_pos] | |
# The 'AGE_GENDER_MODEL_MEAN_VALUES' calculated by using the numpy.mean() | |
AGE_GENDER_MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746) | |
# create blob of sliced face image | |
face_image_blob = cv2.dnn.blobFromImage(face_image, 1, (227, 227), AGE_GENDER_MODEL_MEAN_VALUES, swapRB=False) | |
# give input to age detection model | |
age_cov_net.setInput(face_image_blob) | |
# get the predicted label from the model | |
age_prediction = age_cov_net.forward() | |
# find the max value of predicted index | |
# pass the index to age_label_list to get the associated label text | |
age_label = age_label_list[age_prediction[0].argmax()] | |
# draw rectangle around each face | |
cv2.rectangle(frame, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2) | |
# disply the name of age as text along the face | |
font = cv2.FONT_HERSHEY_SIMPLEX | |
cv2.rectangle(frame, (left_pos-1, bottom_pos + 18), (right_pos+1, bottom_pos), (0, 0, 255), -1) | |
cv2.putText(frame, age_label, (left_pos+2,bottom_pos+11), font, 0.5, (255,255,255), 1) | |
#--------------------------------------------------------------------------- | |
# display frame | |
cv2.imshow("Video",frame) | |
# for only google colab | |
# cv2_imshow(frame) | |
# Press 'q' on the keyboard to break the while loop | |
if cv2.waitKey(1) & 0xFF == ord('q'): | |
break | |
# Release the video stream resource | |
video_stream.release() | |
cv2.destroyAllWindows() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment