Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save rragundez/8b1b9e190abef8ffb6d93f03f8e0e091 to your computer and use it in GitHub Desktop.
Save rragundez/8b1b9e190abef8ffb6d93f03f8e0e091 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from glob import glob\n",
"from PIL import Image\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"\n",
"from keras.applications.vgg19 import VGG19, preprocess_input\n",
"from keras.callbacks import ModelCheckpoint\n",
"from keras.models import Model\n",
"from keras.layers import Flatten, Dense, Dropout, GlobalMaxPooling2D\n",
"from keras.utils import to_categorical\n",
"from keras_preprocessing.image import ImageDataGenerator"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gender_to_index = {0: 'male', 1: 'female'}\n",
"race_to_index = {0: 'white', 1: 'black', 2: 'asian', 3: 'indian', 4: 'others_hispanic_middle_eastern'}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"filenames, ages, genders, races = [], [], [], []\n",
"count = 0\n",
"for filename in glob('/home/rodrigo/.keras/datasets/UTKFace/*.jpg'):\n",
" try:\n",
" age, gender, race = (int(v) for v in filename.split('UTKFace/')[1].split('_')[:3])\n",
" except ValueError:\n",
" count += 1\n",
" else:\n",
" filenames.append(filename), ages.append(age), genders.append(gender), races.append(race)\n",
" \n",
"if count:\n",
" print(f'Found {count} not conforming filenames')\n",
" \n",
"faces = pd.DataFrame({\n",
" 'filename': filenames,\n",
" 'age': ages,\n",
" 'gender': genders,\n",
" 'race': races\n",
"})\n",
"del filenames, ages, genders, races\n",
"faces.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"factor = 2\n",
"\n",
"w, h = Image.open(faces['filename'].iloc[0]).size\n",
"H = h // factor\n",
"W = w // factor\n",
"print(H, W)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"BATCH_SIZE = 128\n",
"gen = ImageDataGenerator(\n",
" horizontal_flip=True,\n",
" rotation_range=10,\n",
" preprocessing_function=preprocess_input,\n",
" validation_split=0.2\n",
")\n",
"\n",
"faces_train_gen = gen.flow_from_dataframe(\n",
" faces,\n",
" y_col='age',\n",
" class_mode='raw',\n",
" target_size=(H, W),\n",
" batch_size=BATCH_SIZE,\n",
" subset='training'\n",
")\n",
"\n",
"faces_val_gen = gen.flow_from_dataframe(\n",
" faces,\n",
" y_col='age',\n",
" class_mode='raw',\n",
" target_size=(H, W),\n",
" batch_size=BATCH_SIZE,\n",
" subset='validation'\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"base_model = VGG19(include_top=False, weights='imagenet', input_shape=(H, W, 3))\n",
"for layer in base_model.layers:\n",
" layer.trainable=False\n",
"\n",
"x = GlobalMaxPooling2D()(base_model.output)\n",
"x = Dense(64, activation='relu')(x) \n",
"age_prediction = Dense(1)(x)\n",
"\n",
"model = Model(inputs=base_model.input, outputs=age_prediction) \n",
"model.compile(optimizer='Adam',loss='mean_squared_error',metrics=['mean_absolute_error'])\n",
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.compile(\n",
" optimizer='Adam',loss='mean_squared_error',\n",
" metrics=['mean_absolute_error']\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"train_steps = faces_train_gen.n // BATCH_SIZE // 8\n",
"val_steps = faces_val_gen.n // BATCH_SIZE // 8\n",
"\n",
"model.fit_generator(\n",
" faces_train_gen,\n",
" epochs=1,\n",
" steps_per_epoch=train_steps,\n",
" validation_data=faces_val_gen,\n",
" validation_steps=val_steps,\n",
" max_queue_size=10,\n",
" callbacks=[ModelCheckpoint('age_regression.h5', save_best_only=True)]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### multi-task"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"onehot_races = to_categorical(faces['race'].values)\n",
"faces['onehot_races'] = onehot_races.tolist()\n",
"faces.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"BATCH_SIZE = 128\n",
"\n",
"faces_train_gen = gen.flow_from_dataframe(\n",
" faces,\n",
" y_col=['age', 'gender', 'onehot_races'],\n",
" class_mode='multi_output',\n",
" target_size=(H, W),\n",
" batch_size=BATCH_SIZE,\n",
" subset='training'\n",
")\n",
"\n",
"faces_val_gen = gen.flow_from_dataframe(\n",
" faces,\n",
" y_col=['age', 'gender', 'onehot_races'],\n",
" class_mode='multi_output',\n",
" target_size=(H, W),\n",
" batch_size=BATCH_SIZE,\n",
" subset='validation'\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"x = GlobalMaxPooling2D()(base_model.output)\n",
"x = Dense(64, activation='relu')(x) \n",
"\n",
"age_prediction = Dense(1, name='age_prediction')(x)\n",
"gender_prediction = Dense(1, activation='sigmoid')(x)\n",
"race_prediction = Dense(5, activation='softmax')(x)\n",
"\n",
"model = Model(inputs=base_model.input, outputs=[age_prediction, gender_prediction, race_prediction]) "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.compile(\n",
" optimizer='Adam',\n",
" loss=['mean_squared_error', 'binary_crossentropy', 'categorical_crossentropy'],\n",
" metrics={'age_prediction': 'mean_absolute_error'}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_steps = faces_train_gen.n // BATCH_SIZE\n",
"val_steps = faces_val_gen.n // BATCH_SIZE // 2\n",
"\n",
"model.fit_generator(\n",
" faces_train_gen,\n",
" epochs=100,\n",
" steps_per_epoch=train_steps,\n",
" validation_data=faces_val_gen,\n",
" validation_steps=val_steps,\n",
" max_queue_size=10,\n",
" callbacks=[ModelCheckpoint('age_multi-task.h5', save_best_only=True)]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Live video stream"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"bgr_to_rgb = lambda x: cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n",
"face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"index_to_gender = {0: 'male', 1: 'female'}\n",
"index_to_race = {0: 'white', 1: 'black', 2: 'asian', 3: 'indian', 4: 'others_hispanic_middle_eastern'}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from keras.models import load_model\n",
"from keras.applications.vgg19 import VGG19, preprocess_input"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def process_frame(frame, face_x_y_w_h, target_shape):\n",
" (x, y, w, h) = face_x_y_w_h\n",
" cv2.rectangle(frame, (x, y),(x + w, y + h), (255, 0, 0), 2)\n",
" face_img = frame[y: y + h, x: x + w]\n",
" face_img = cv2.resize(face_img, target_shape)\n",
" face_img = bgr_to_rgb(face_img)\n",
" face_img = preprocess_input(face_img)\n",
" return frame, face_img\n",
"\n",
"def process_face(face_img, model, index_to_gender, index_to_race):\n",
" preds = model.predict(face_img[None, ...])\n",
" if len(preds) == 1:\n",
" age = int(preds[0])\n",
" return {'Age': age}\n",
" else:\n",
" age, gender, race = preds\n",
" age = int(age)\n",
" gender = index_to_gender[round(float(gender))]\n",
" race = index_to_race[np.argmax(race)]\n",
" return {'Race': race, 'Gender': gender, 'Age': age}\n",
" \n",
"model = load_model('age_multi-task.h5')\n",
"model = load_model('age_regression.h5')\n",
"\n",
"font_size = 24\n",
"frame_rate = 50\n",
"delta_t_frame = 1000 // frame_rate\n",
"delta_t_frame\n",
"\n",
"CAMERA = cv2.VideoCapture(0)\n",
"cv2.namedWindow('Live stream')\n",
"\n",
"while True:\n",
" frame = CAMERA.read()[1]\n",
"\n",
" frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n",
" faces = face_cascade.detectMultiScale(frame_gray, scaleFactor=1.3, minNeighbors=5)\n",
" \n",
" for (x, y, w, h) in faces:\n",
" frame, face_image = process_frame(frame, (x, y, w, h), (100, 100))\n",
" predictions_dict = process_face(face_image, model, index_to_gender, index_to_race)\n",
" for i, (k, v) in enumerate(predictions_dict.items()):\n",
" cv2.addText(frame, f'{k}: {v}', (x, y - i * font_size - 10), nameFont='Times', pointSize=font_size, color=(255 , 0, 0))\n",
" cv2.imshow('Live stream', frame)\n",
" if cv2.waitKey(delta_t_frame) & 0xFF == ord('q'):\n",
" break\n",
"\n",
"CAMERA.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"CAMERA.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [default]",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment