Skip to content

Instantly share code, notes, and snippets.

@dsilver829
Last active May 27, 2017 01:46
Show Gist options
  • Save dsilver829/e7345a4f221b3aec5f17c02afb0af9a3 to your computer and use it in GitHub Desktop.
Save dsilver829/e7345a4f221b3aec5f17c02afb0af9a3 to your computer and use it in GitHub Desktop.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(".", one_hot=True, reshape=False)
import tensorflow as tf
# Parameters
learning_rate = 0.001
batch_size = 128
training_epochs = 5
n_classes = 10 # MNIST total classes (0-9 digits)
layer_width = {
'layer_1': 32,
'layer_2': 64,
'layer_3': 128,
'fully_connected': 512
}
# Store layers weight & bias
weights = {
'layer_1': tf.Variable(tf.truncated_normal(
[5, 5, 1, layer_width['layer_1']])),
'layer_2': tf.Variable(tf.truncated_normal(
[5, 5, layer_width['layer_1'], layer_width['layer_2']])),
'layer_3': tf.Variable(tf.truncated_normal(
[5, 5, layer_width['layer_2'], layer_width['layer_3']])),
'fully_connected': tf.Variable(tf.truncated_normal(
[4*4*128, layer_width['fully_connected']])),
'out': tf.Variable(tf.truncated_normal(
[layer_width['fully_connected'], n_classes]))
}
biases = {
'layer_1': tf.Variable(tf.zeros(layer_width['layer_1'])),
'layer_2': tf.Variable(tf.zeros(layer_width['layer_2'])),
'layer_3': tf.Variable(tf.zeros(layer_width['layer_3'])),
'fully_connected': tf.Variable(tf.zeros(layer_width['fully_connected'])),
'out': tf.Variable(tf.zeros(n_classes))
}
def conv2d(x, W, b, strides=1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.tanh(x)
def maxpool2d(x, k=2):
return tf.nn.max_pool(
x,
ksize=[1, k, k, 1],
strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases):
# Layer 1 - 28*28*1 to 14*14*32
conv1 = conv2d(x, weights['layer_1'], biases['layer_1'])
conv1 = maxpool2d(conv1)
# Layer 2 - 14*14*32 to 7*7*64
conv2 = conv2d(conv1, weights['layer_2'], biases['layer_2'])
conv2 = maxpool2d(conv2)
# Layer 3 - 7*7*64 to 4*4*128
conv3 = conv2d(conv2, weights['layer_3'], biases['layer_3'])
conv3 = maxpool2d(conv3)
# Fully connected layer - 4*4*128 to 512
# Reshape conv3 output to fit fully connected layer input
fc1 = tf.reshape(
conv3,
[-1, weights['fully_connected'].get_shape().as_list()[0]])
fc1 = tf.add(
tf.matmul(fc1, weights['fully_connected']),
biases['fully_connected'])
fc1 = tf.nn.tanh(fc1)
# Output Layer - class prediction - 512 to 10
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# tf Graph input
x = tf.placeholder("float", [None, 28, 28, 1])
y = tf.placeholder("float", [None, n_classes])
logits = conv_net(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# Display logs per epoch step
c = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(
"Accuracy:",
accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment