Skip to content

Instantly share code, notes, and snippets.

@bravo325806
Created March 27, 2019 13:43
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bravo325806/52e90d53de8891e5d1b0d4e4cb6dbf6e to your computer and use it in GitHub Desktop.
Save bravo325806/52e90d53de8891e5d1b0d4e4cb6dbf6e to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
import os
import time
import cv2
from random import shuffle
class mnist(object):
learning_rate = 0.001
input_node_name = 'input'
output_node_name = 'output'
num_classes = 10
train_set = []
test_set = []
def __init__(self, is_training=True):
self.x = tf.placeholder(dtype=tf.float32, shape=[1, 28, 28, 3], name=self.input_node_name)
self.y = tf.placeholder(dtype=tf.float32, shape=[1, self.num_classes])
self.get_list()
self.network()
self.train()
self.summary()
self.saver = tf.train.Saver()
self.init = tf.global_variables_initializer()
def network(self):
conv_1 = tf.layers.conv2d(inputs=self.x, filters=64, kernel_size=[3,3], padding='same', activation=tf.nn.relu)
pool_1 = tf.layers.max_pooling2d(inputs=conv_1, pool_size=[2,2], strides=2)
conv_2 = tf.layers.conv2d(inputs=pool_1, filters=128, kernel_size=[3,3], padding='same', activation=tf.nn.relu)
pool_2 = tf.layers.max_pooling2d(inputs=conv_2, pool_size=[2,2], strides=2)
conv_3 = tf.layers.conv2d(inputs=pool_2, filters=256, kernel_size=[3,3], padding='same', activation=tf.nn.relu)
pool_3 = tf.layers.max_pooling2d(inputs=conv_3, pool_size=[2,2], strides=2, padding='same')
flatten = tf.layers.flatten(pool_3)
fully = tf.layers.dense(flatten, 1024, activation=tf.nn.relu)
self.logits = tf.layers.dense(fully, 10)
self.outputs = tf.nn.softmax(self.logits, name=self.output_node_name)
def train(self):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
self.correct_pred = tf.equal(tf.argmax(self.outputs, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
self.test_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y))
self.test_accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
def summary(self):
summary_train_loss = tf.summary.scalar(name="train", tensor=self.loss, family="loss")
summary_train_accuracy = tf.summary.scalar(name="train", tensor=self.accuracy, family="accuracy")
summary_test_loss = tf.summary.scalar(name="test", tensor=self.test_loss, family="loss")
summary_test_accuracy = tf.summary.scalar(name="test", tensor=self.test_accuracy, family="accuracy")
self.merged_summary_train_op = tf.summary.merge([summary_train_loss, summary_train_accuracy])
self.merged_summary_test_op = tf.summary.merge([summary_test_loss, summary_test_accuracy])
def get_list(self):
for root, dirs, files in os.walk('./trainingSet'):
for file in files:
label = root.split('/')[-1]
dic = {'label':label, 'file':root+"/"+file}
self.train_set.append(dic)
def get_train_image(self, batch_size=64):
batch_features = []
labels = []
while True:
shuffle(self.train_set)
for data in self.train_set:
image = cv2.imread(data['file'], cv2.IMREAD_COLOR)
resize_image = cv2.resize(image, (28,28), interpolation=cv2.INTER_CUBIC)
b,g,r = cv2.split(resize_image)
rgb_img = cv2.merge([r,g,b])
rgb_img = rgb_img/255.0
batch_features.append(rgb_img)
label = self.dense_to_one_hot(int(data['label']), self.num_classes)
labels.append(label)
if len(batch_features) >= batch_size:
yield np.array(batch_features), np.array(labels)
def get_test_image(self, batch_size=64):
batch_features = []
files = os.listdir("./testSample/")
while True:
shuffle(files)
for file in files:
image = cv2.imread("./trainingSet/3/img_9.jpg", cv2.IMREAD_COLOR)
#image = cv2.imread("/home/cheng/machineLearning/ncsdk/examples/tensorflow/mnisttestSample/"+file, cv2.IMREAD_COLOR)
resize_image = cv2.resize(image, (28,28), interpolation=cv2.INTER_CUBIC)
b,g,r = cv2.split(resize_image)
rgb_img = cv2.merge([r,g,b])
rgb_img = rgb_img/255.0
batch_features.append(rgb_img)
if len(batch_features) >= batch_size:
yield np.array(batch_features)
def dense_to_one_hot(self, labels_dense, num_classes=10):
return np.eye(num_classes)[labels_dense]
MODEL_NAME = 'mnist'
batch_size = 1
iters = 1
mnist_net = mnist(is_training=True)
with tf.Session() as sess:
sess.run(mnist_net.init)
mnist_net.saver.restore(sess, "mnist_out/mnist")
step = 0
while(step < iters):
test_batch = mnist_net.get_test_image(batch_size)
batch_x = next(test_batch)
net_output = sess.run(mnist_net.outputs, feed_dict={mnist_net.x: batch_x})
result = 0
index = 0
for x in net_output[0]:
if x > result:
result = x
index = net_output[0].tolist().index(x)
print("label:", index, "convidence:", result)
step +=1
mnist_net.saver.save(sess, 'output/' + MODEL_NAME)
# print("Finish")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment