Skip to content

Instantly share code, notes, and snippets.

@matheushent
Last active January 29, 2019 14:45
Show Gist options
  • Save matheushent/9a32589d2d0beab53e7ab7c1b8aea849 to your computer and use it in GitHub Desktop.
Save matheushent/9a32589d2d0beab53e7ab7c1b8aea849 to your computer and use it in GitHub Desktop.
Neural network for numbers classification
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('mnist/', one_hot=True)
X_train = mnist.train.images
y_train = mnist.train.labels
X_test = mnist.test.images
y_test = mnist.test.labels
import matplotlib.pyplot as plt
import numpy as np
'''
plt.imshow(X_train[27].reshape(28, 28))
plt.title('Classe: ' + str(np.argmax(y_train[27])))
plt.show()
'''
in_neuron = X_train.shape[1]
hidden_neuron1 = int((X_train.shape[1] + y_train.shape[1])/2)
hidden_neuron2, hidden_neuron3 = hidden_neuron1, hidden_neuron1
out_neuron = y_train.shape[1]
import tensorflow as tf
W = {'hidden1': tf.Variable(tf.random_normal([in_neuron, hidden_neuron1])),
'hidden2': tf.Variable(tf.random_normal([hidden_neuron1, hidden_neuron2])),
'hidden3': tf.Variable(tf.random_normal([hidden_neuron2, hidden_neuron3])),
'out': tf.Variable(tf.random_normal([hidden_neuron3, out_neuron]))}
b = {'hidden1': tf.Variable(tf.random_normal([hidden_neuron1])),
'hidden2': tf.Variable(tf.random_normal([hidden_neuron2])),
'hidden3': tf.Variable(tf.random_normal([hidden_neuron3])),
'out': tf.Variable(tf.random_normal([out_neuron]))}
xph = tf.placeholder('float', [None, in_neuron])
yph = tf.placeholder('float', [None, out_neuron])
def mlp(x, W, bias):
hidden_layer1 = tf.nn.relu(tf.add(tf.matmul(x, W['hidden1']), bias['hidden1']))
hidden_layer2 = tf.nn.relu(tf.add(tf.matmul(hidden_layer1, W['hidden2']), bias['hidden2']))
hidden_layer3 = tf.nn.relu(tf.add(tf.matmul(hidden_layer2, W['hidden3']), bias['hidden3']))
out_layer = tf.add(tf.matmul(hidden_layer3, W['out']), bias['out'])
return out_layer
model = mlp(xph, W, b)
error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=yph))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(error)
predictions = tf.nn.softmax(model)
predicted_numerations = tf.argmax(predictions, 1)
correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(yph, 1))
hit_rate = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(5000):
X_batch, y_batch = mnist.train.next_batch(128)
_, cost = sess.run([optimizer, error], feed_dict={xph: X_batch, yph: y_batch})
if epoch % 100 == 0:
acc = sess.run(hit_rate, feed_dict={xph: X_batch, yph: y_batch})
print('Epoch: ' + str((epoch + 1)) + ' -> Error: ' + str(cost) + ' -> Acc: ' + str(acc))
if acc >= 0.95:
break
print('Training finished.')
print(sess.run(predictions, feed_dict={xph: X_test}))
print(sess.run(predicted_numerations, feed_dict={xph: X_test}))
print(sess.run(correct_predictions, feed_dict={xph: X_test, yph: y_test}))
print(acc)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment