Skip to content

Instantly share code, notes, and snippets.

@sbarratt
Created August 15, 2016 17:15
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save sbarratt/1853e173b97b5d6d993162707fe4fa0b to your computer and use it in GitHub Desktop.
Save sbarratt/1853e173b97b5d6d993162707fe4fa0b to your computer and use it in GitHub Desktop.
"""CNN from https://www.microsoft.com/en-us/research/wp-content/uploads/2003/08/icdar03.pdf"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
def weight_variable(shape):
initial = tf.random_normal(shape, stddev=0.05)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,2,2,1], padding='VALID')
x = tf.placeholder(tf.float32, [None, 28*28])
y_ = tf.placeholder(tf.float32, [None, 10])
_x = tf.reshape(x, [-1, 28, 28, 1])
x_image = tf.image.resize_bilinear(_x, (29, 29))
W_conv1 = weight_variable([5, 5, 1, 5])
b_conv1 = bias_variable([5])
h_1 = tf.nn.sigmoid( conv2d(x_image, W_conv1) + b_conv1 )
W_conv2 = weight_variable([5, 5, 5, 50])
b_conv2 = bias_variable([50])
h_2 = tf.nn.sigmoid( conv2d(h_1, W_conv2) + b_conv2 )
h_2_flattened = tf.reshape(h_2, [-1, 5*5*50])
W_3 = tf.Variable(tf.random_normal([5*5*50, 100], stddev=.05))
b_3 = tf.Variable(tf.random_normal([100], stddev=.05))
h_3 = tf.nn.sigmoid( tf.matmul(h_2_flattened, W_3) + b_3 )
W_4 = tf.Variable(tf.random_normal([100, 10], stddev=.05))
b_4 = tf.Variable(tf.random_normal([10], stddev=.05))
y = tf.nn.softmax( tf.matmul(h_3, W_4) + b_4 )
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
num_train_images = mnist.train.images.shape[0]
num_epochs = 1000
minibatch_size = 10
learning_rate = 0.005
alpha = 0.3
learning_rate_ = tf.placeholder(tf.float32, shape=[])
train_step = tf.train.GradientDescentOptimizer(learning_rate_).minimize(cross_entropy)
assert num_train_images % minibatch_size == 0
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(num_epochs):
for i in range(num_train_images // minibatch_size):
batch = mnist.train.next_batch(minibatch_size)
train_step.run(feed_dict={x: batch[0], y_: batch[1], learning_rate_: learning_rate})
if epoch > 0 and epoch % 100 == 0:
learning_rate *= alpha
print ("epoch %d" % epoch)
validation_accuracy = accuracy.eval(feed_dict={x: mnist.validation.images, y_: mnist.validation.labels})
print( "validation accuracy %g" % validation_accuracy)
test_accuracy = accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print("test accuracy %g" % test_accuracy)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment