Skip to content

Instantly share code, notes, and snippets.

@devindersarai
devindersarai / live-facial-expression-recognition.py
Last active April 12, 2021 02:20
Facial expression from a webcam (or video file) live.
import cv2
import numpy as np
import os
import keras
from keras.models import load_model
dicts = ['empty', 'neutral', 'calm', 'happy', 'sad', 'angry', 'fear', 'disgust', 'surprise', 'neutral', 'calm', 'happy', 'sad', 'angry', 'fear', 'disgust', 'surprise']
# To capture video from a webcam
@devindersarai
devindersarai / cnn-model.py
Last active April 12, 2021 01:42
CNN model for facial expression recognition, an arbitrary dropout value of 0.5 was chosen.
model = Sequential()
model.add(Convolution2D(filters=16, kernel_size=(7, 7), padding='same', input_shape=(256,256,3)))
model.add(BatchNormalization())
model.add(Convolution2D(filters=16, kernel_size=(7, 7), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
model.add(Dropout(0.5))
model.add(Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
@devindersarai
devindersarai / video-preprocessing.py
Created April 11, 2021 02:34
Preprocessing the videos from the RAVDESS dataset into individual frames of the actors faces and saving them to the appropriate folder (based on the labeled emotion).
dim = (384, 216)
dim_final = (224, 224)
frameRate = 30 # Every nth frame is saved.
# Detect the face in image using HAAR cascade, crop it, resize it, and finally save it.
for actor in glob.glob(VIDEO_FOLDER):
for videoFile in glob.glob(actor + '/*'):
print(videoFile)
emotion = label.get_emotion(videoFile[-24:])
cap = cv2.VideoCapture(videoFile) # capturing the video from the given path
while(cap.isOpened()):