Skip to content

Instantly share code, notes, and snippets.

View reinaw1012's full-sized avatar

Chi-Feng Wang reinaw1012

View GitHub Profile
import doctest
import sys
import argparse
"""
---USAGE---
python3 sol06.py <name_of_function>
e.g python3 sol06.py nonlocalist
import doctest
import sys
import argparse
"""
---USAGE---
python3 disc06.py <name_of_function>
e.g python3 disc06.py nonlocalist
# Credits to Laryn Qi
import doctest
import sys
import argparse
"""
---USAGE---
python3 disc04.py <name_of_function>
e.g python3 disc04.py multiply
# callbacks
log_file_path = base_path + dataset + '_emotion_training.log'
csv_logger = CSVLogger(log_file_path, append=False)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,patience=int(patience/4), verbose=1)
tensorboard = TensorBoard(log_dir='./tf_graph', histogram_freq=0,write_graph=True, write_images=True)
trained_models_path = base_path + dataset + '_my_FeatCNN'
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1,save_best_only=True)
callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr,tensorboard]
#Load model
emotion_classifier = tf.keras.models.load_model(emotion_model_path, compile=False)
#Generate hard data
faces = []
emotions = []
#ntrain_faces = train_faces[:50]
#ntrain_emotions = train_emotions[:50]
for i in range(len(train_faces)): #For each face
face = train_faces[i] #train_faces is an array of all the training images
data_generator = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=.1,
horizontal_flip=True)
def my_FeatCNN(input_shape,classes):
padding = 'valid'
img_input = keras.layers.Input(shape=input_shape)
# START MODEL
conv_1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding=padding, activation='relu', name='conv_1')(img_input)
maxpool_1 = keras.layers.MaxPooling2D((2, 2), strides=(2,2))(conv_1)
x = tf.keras.layers.BatchNormalization()(maxpool_1)
# FEAT-EX1
def my_newCNN(input_shape, num_classes):
model = tf.keras.Sequential()
model.add(keras.layers.Conv2D(16, kernel_size=(3, 3), padding='same',
name='image_array', input_shape=input_shape, activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(16, kernel_size=(3, 3), padding='same',
strides=(2, 2), activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(.25))
model.add(keras.layers.SeparableConv2D(32, kernel_size=(3, 3), padding='same',
def my_CNN(input_shape, num_classes):
model = tf.keras.Sequential()
model.add(keras.layers.Conv2D(5, kernel_size=(3, 3), padding='same',
name='image_array', input_shape=input_shape, activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(10, kernel_size=(3, 3), padding='same',
strides=(2, 2), activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(.25))
model.add(keras.layers.Conv2D(16, kernel_size=(3, 3), padding='same',
bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
#faces = detect_faces(face_detection, gray_image) <- Their original Haar-based classifier code
result = detector.detect_faces(bgr_image)
if result != []:
boundingbox = result[0]['box']
#Their Haar-based classifier outputs a square bounding box, while the MTCNN detector doesn't.
#Therefore, I'll take the whatever's smaller--the width or height--and shift that outwards to become a square
#Because that might create a not-quite-centered bounding box, I shifted the box back a little to center it again.