Skip to content

Instantly share code, notes, and snippets.

@ebfio
Forked from noio/detect-hands.py
Created March 22, 2018 06:54
Show Gist options
  • Save ebfio/44e013c1ae2f3a6dbb01fee5015e521c to your computer and use it in GitHub Desktop.
Save ebfio/44e013c1ae2f3a6dbb01fee5015e521c to your computer and use it in GitHub Desktop.
detecting hands
import os
import sys
import warnings
import random
import numpy as np
import pandas as pd
from keras import backend as K
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import Adam
from keras.preprocessing import image
from keras.callbacks import History, Callback
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from vgg16 import VGG16, add_conv_block, VGG_preprocess
from ipywidgets import interact
from matplotlib import pyplot as plt
%matplotlib notebook
data_dir = "../../hands_data/hands_motion_mini"
INPUT_DIMENSIONS = (3,240,426)
# data_dir = "../../hands_data/outdoor_1_cropped"
# INPUT_DIMENSIONS = (3,360,640)
def HandTracker(weights_file = None):
model = Sequential()
model.add(Lambda(VGG_preprocess, input_shape=INPUT_DIMENSIONS, output_shape=INPUT_DIMENSIONS))
# with all lines this is the full VGG model
add_conv_block(model, 2, 64)
add_conv_block(model, 2, 128)
# add_conv_block(model, 3, 256)
# add_conv_block(model, 3, 512)
# add_conv_block(model, 3, 512)
vgg = VGG16()
for model_layer, vgg_layer in zip(model.layers, vgg.layers):
model_layer.set_weights(vgg_layer.get_weights())
model_layer.trainable = False
fils = 512
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(fils, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(fils, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(fils, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
# model.add(ZeroPadding2D((1,1)))
# model.add(Conv2D(512, (3, 3), activation='relu'))
# model.add(ZeroPadding2D((1,1)))
# model.add(Conv2D(512, (3, 3), activation='relu'))
# model.add(ZeroPadding2D((1,1)))
# model.add(Conv2D(512, (3, 3), activation='relu'))
# model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Conv2D(1, (1,1)))
model.compile(optimizer=Adam(lr=0.001), loss=mean_squared_error_modified, metrics=['accuracy'])
return model
model = HandTracker()
BATCH_SIZE = 32
imageDataGen = image.ImageDataGenerator(
horizontal_flip=True,
# zoom_range=(2.0,4.0),
# width_shift_range=0.2,
# height_shift_range=0.2,
fill_mode='constant'
)
net_output_size = model.layers[-1].output_shape[2:]
frames = imageDataGen.flow_from_directory(data_dir + "/frames",
batch_size=BATCH_SIZE,
target_size=INPUT_DIMENSIONS[1:],
shuffle=False, seed=0)
targets = imageDataGen.flow_from_directory(data_dir + "/hand",
batch_size=BATCH_SIZE,
target_size=net_output_size,
color_mode='grayscale',
shuffle=False, seed=0)
def zip_batches(input_batches, output_batches):
if not isinstance(input_batches, list):
input_batches = [input_batches]
if not isinstance(output_batches, list):
output_batches = [output_batches]
while True:
yield ([next(b)[0] for b in input_batches], [next(b)[0] for b in output_batches])
training_stream = zip_batches(frames, targets)
model.fit_generator(training_stream, (frames.samples // frames.batch_size) + 1, epochs=8)
model.save_weights("model_weights.h5")
(inputs, targets) = next(training_stream)
predicted = model.predict(inputs[0])
plt.ion()
fig = plt.figure()
top = fig.add_subplot(3,1,1)
mid = fig.add_subplot(3,1,2)
bottom = fig.add_subplot(3,1,3)
def vis(im, fil):
pred_im = predicted[im].transpose(1,2,0).squeeze()
top.imshow(pred_im, vmin=0, vmax=255)
inp_im = inputs[0][im,:,:,:].transpose((1,2,0))
mid.imshow(inp_im)
target_im = targets[0][im].transpose(1,2,0).squeeze()
bottom.imshow(target_im, vmin=0, vmax=255)
plt.show()
interact(vis, im=(0,BATCH_SIZE-1), fil=(0,255))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment