Skip to content

Instantly share code, notes, and snippets.

@JoshVarty
Last active January 27, 2018 17:24
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save JoshVarty/5abc939aebd4dc074f4706f8dcbd3e0d to your computer and use it in GitHub Desktop.
Save JoshVarty/5abc939aebd4dc074f4706f8dcbd3e0d to your computer and use it in GitHub Desktop.
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
train_images = mnist.train.images;
train_labels = mnist.train.labels
test_images = mnist.test.images;
test_labels = mnist.test.labels
graph = tf.Graph()
with graph.as_default():
input = tf.placeholder(tf.float32, shape=(None, 784))
labels = tf.placeholder(tf.float32, shape=(None, 10))
#Add our three layers
layer1_weights = tf.Variable(tf.random_normal([784, 500]))
layer1_bias = tf.Variable(tf.zeros([500]))
layer1_output = tf.nn.relu(tf.matmul(input, layer1_weights) + layer1_bias)
layer2_weights = tf.Variable(tf.random_normal([500, 500]))
layer2_bias = tf.Variable(tf.zeros([500]))
layer2_output = tf.nn.relu(tf.matmul(layer1_output, layer2_weights) + layer2_bias)
layer3_weights = tf.Variable(tf.random_normal([500, 10]))
layer3_bias = tf.Variable(tf.zeros([10]))
logits = tf.matmul(layer2_output, layer3_weights) + layer3_bias
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
#Use a smaller learning rate
learning_rate = 0.0001
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
predictions = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(labels, 1), tf.argmax(predictions, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
num_steps = 5000
batch_size = 100
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_images = train_images[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {input: batch_images, labels: batch_labels}
_, c, acc = session.run([optimizer, cost, accuracy], feed_dict=feed_dict)
if step % 100 == 0:
print("Cost: ", c)
print("Accuracy: ", acc * 100.0, "%")
#Test
num_test_batches = int(len(test_images) / 100)
total_accuracy = 0
total_cost = 0
for step in range(num_test_batches):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_images = test_images[offset:(offset + batch_size), :]
batch_labels = test_labels[offset:(offset + batch_size), :]
feed_dict = {input: batch_images, labels: batch_labels}
_, c, acc = session.run([optimizer, cost, accuracy], feed_dict=feed_dict)
total_cost = total_cost + c
total_accuracy = total_accuracy + acc
print("Test Cost: ", total_cost / num_test_batches)
print("Test accuracy: ", total_accuracy * 100.0 / num_test_batches, "%")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment