Skip to content

Instantly share code, notes, and snippets.

@KevinPatel04
Last active April 26, 2020 11:57
Show Gist options
  • Save KevinPatel04/50b192daee6de6e9735c5ca37eacc7ec to your computer and use it in GitHub Desktop.
Save KevinPatel04/50b192daee6de6e9735c5ca37eacc7ec to your computer and use it in GitHub Desktop.
Face Expression Recognition Blog
•
├── test
│   ├── modi-obama-1.jpg
│   ├── modi-obama-2.jpg
│   └── face-demographics-walking.mp4
├── models
│   ├── facial_expression_model_weights.h5
│   └── facial_expression_model_structure.json
├── facial-expression-recognition-from-image.py
└── realtime-facial-expression-recognition.py
# importing required libraries
import cv2
import numpy as np
from keras.preprocessing import image
from keras.models import model_from_json
import face_recognition
# only for google colab
# from google.colab.patches import cv2_imshow
# face expression model initialization
face_exp_model = model_from_json(open("models/facial_expression_model_structure.json","r").read())
# load weights into model
face_exp_model.load_weights("models/facial_expression_model_weights.h5")
# list of emotions labels
emotions_label = ('Angry','Digust','Fear','Happy','Sad','Surprise','Neutral')
# load the image to be detected
img = cv2.imread('test/modi-obama-1.jpg');
# find all face locations using face_locations() function
# model can be "cnn" or "hog"
# number_of_times_to_upsample = 1 higher and detect more faces
all_face_locations = face_recognition.face_locations(img, model="hog")
# loop over each face detected in the image
for index, face_location in enumerate(all_face_locations):
top_pos, right_pos, bottom_pos, left_pos = face_location
#-----------------------------------------------------------------------------
# Face Expression Detection
#-----------------------------------------------------------------------------
# Slice frame image array by positions face_locations
face_image = img[top_pos:bottom_pos,left_pos:right_pos]
# preprocessing input, convert it to an image like as the data in dataset
# convert face image to grayscale
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
# resize to 48X48 px size
face_image = cv2.resize(face_image, (48, 48))
# convert the PIL image into a 3D numpy array
img_pixels = image.img_to_array(face_image)
# expand the shape of an array into single row multiple columns
img_pixels = np.expand_dims(img_pixels, axis = 0)
# pixels are in range of [0, 255]. Normalize all pixels in scale of [0, 1]
img_pixels /= 255
# do prediction using model, get the prediction values for all 7 expressions
exp_predictions = face_exp_model.predict(img_pixels)
# find max indexed prediction value (0 till 7)
max_index = np.argmax(exp_predictions[0])
# get the corresponding label from emotions_label list
emotion_label = emotions_label[max_index]
# draw rectangle around each face
cv2.rectangle(img, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2)
# disply the name of emotion as text along the face
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.rectangle(img, (left_pos-1, bottom_pos + 18), (right_pos+1, bottom_pos), (0, 0, 255), -1)
cv2.putText(img, emotion_label, (left_pos+2,bottom_pos+11), font, 0.5, (255,255,255), 1)
#-----------------------------------------------------------------------------
# show image
cv2.imshow("Face Expression Recognition",image)
# only for google colab
# cv2_imshow(img)
# importing required libraries
import cv2
import numpy as np
from keras.preprocessing import image
from keras.models import model_from_json
import face_recognition
# only for google colab
# from google.colab.patches import cv2_imshow
# face expression model initialization
face_exp_model = model_from_json(open("models/facial_expression_model_structure.json","r").read())
# load weights into model
face_exp_model.load_weights("models/facial_expression_model_weights.h5")
# list of emotions labels
emotions_label = ('Angry','Digust','Fear','Happy','Sad','Surprise','Neutral')
# load the image to be detected
img = cv2.imread('test/modi-obama-1.jpg');
# find all face locations using face_locations() function
# model can be "cnn" or "hog"
# number_of_times_to_upsample = 1 higher and detect more faces
all_face_locations = face_recognition.face_locations(img, model="hog")
# loop over each face detected in the image
for index, face_location in enumerate(all_face_locations):
top_pos, right_pos, bottom_pos, left_pos = face_location
#-----------------------------------------------------------------------------
# Face Expression Detection
#-----------------------------------------------------------------------------
# Slice frame image array by positions face_locations
face_image = img[top_pos:bottom_pos,left_pos:right_pos]
# preprocessing input, convert it to an image like as the data in dataset
# convert face image to grayscale
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
# resize to 48X48 px size
face_image = cv2.resize(face_image, (48, 48))
# convert the PIL image into a 3D numpy array
img_pixels = image.img_to_array(face_image)
# expand the shape of an array into single row multiple columns
img_pixels = np.expand_dims(img_pixels, axis = 0)
# pixels are in range of [0, 255]. Normalize all pixels in scale of [0, 1]
img_pixels /= 255
# do prediction using model, get the prediction values for all 7 expressions
exp_predictions = face_exp_model.predict(img_pixels)
# find max indexed prediction value (0 till 7)
max_index = np.argmax(exp_predictions[0])
# get the corresponding label from emotions_label list
emotion_label = emotions_label[max_index]
# draw rectangle around each face
cv2.rectangle(img, (left_pos,top_pos), (right_pos,bottom_pos), (0,0,255), 2)
# disply the name of emotion as text along the face
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.rectangle(img, (left_pos-1, bottom_pos + 18), (right_pos+1, bottom_pos), (0, 0, 255), -1)
cv2.putText(img, emotion_label, (left_pos+2,bottom_pos+11), font, 0.5, (255,255,255), 1)
#-----------------------------------------------------------------------------
# show image
cv2.imshow("Face Expression Recognition",image)
# only for google colab
# cv2_imshow(img)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment