Last active
January 5, 2019 06:07
-
-
Save atinesh-s/facaec2521f7cf9eb927ee2942337092 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from preprocess.normalize import normalize_image, resize_image, crop_center, preprocess_signature | |
import tensorflow.contrib.slim as slim | |
import skimage.transform | |
import tensorflow as tf | |
import skimage.data | |
import numpy as np | |
import glob | |
import cv2 | |
import os | |
def build_model(input_var): | |
conv1 = conv_bn(input_var, 'conv1', num_outputs=96, kernel_size=11, stride=4) | |
pool1 = slim.max_pool2d(conv1, 3, 2, scope='pool1') | |
conv2 = conv_bn(pool1, 'conv2', num_outputs=256, kernel_size=5, padding='SAME') | |
pool2 = slim.max_pool2d(conv2, 3, 2, scope='pool2') | |
pool2_flat = slim.flatten(pool2) | |
net = slim.fully_connected(pool2_flat, 2048, scope='fc1') | |
relu = tf.nn.relu(net) | |
return relu | |
def conv_bn(input, scope, num_outputs, kernel_size, stride=1, padding='VALID'): | |
conv = slim.conv2d(input, | |
num_outputs=num_outputs, | |
kernel_size=kernel_size, | |
stride=stride, | |
padding=padding, | |
scope=scope, | |
weights_initializer=tf.truncated_normal_initializer(stddev=0.01), | |
biases_initializer=None, | |
activation_fn=tf.nn.relu) | |
return conv | |
def load_data(data_dir): | |
directories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir,d))] | |
list_labels = [] | |
list_images = [] | |
for d in directories: | |
label_dir = os.path.join(data_dir,d) | |
file_names = [os.path.join(label_dir,f) for f in os.listdir(label_dir) if f.endswith(".png")] | |
for f in file_names: | |
list_images.append(skimage.data.imread(f)) | |
list_labels.append(int(d)) | |
list_images32 = [skimage.transform.resize(image,(150,220)).astype(np.float32).tolist() for image in list_images] | |
list_images32 = tf.cast(list_images32,tf.float32) | |
images = np.array(list_images32) | |
labels = np.asarray(list_labels,np.int32) | |
return images, labels | |
def main(self): | |
tf.logging.set_verbosity(tf.logging.INFO) | |
model_path = 'model' | |
train_path = 'data2/train' | |
test_path = 'data2/test' | |
train_data, train_labels = load_data(train_path) | |
test_data, test_labels = load_data(test_path) | |
print(train_data) | |
print(train_labels) | |
print(test_data) | |
print(test_labels) | |
predictions = build_model(train_data) | |
one_hot_labels = slim.one_hot_encoding(train_labels) | |
slim.losses.softmax_cross_entropy(predictions, one_hot_labels) | |
total_loss = slim.losses.get_total_loss() | |
tf.summary.scalar('losses/Total Loss', total_loss) | |
optimizer = tf.train.AdamOptimizer(learning_rate=0.01) | |
train_op = slim.learning.create_train_op(total_loss, optimizer) | |
final_loss = slim.learning.train(train_op, | |
logdir=model_path, | |
number_of_steps=20, | |
save_summaries_secs=5) | |
if __name__ == '__main__': | |
tf.app.run() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment