Created
April 3, 2016 21:18
-
-
Save nijotz/022aa867c31d1427a60d31e887c582ce to your computer and use it in GitHub Desktop.
Deep MNIST
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
from tensorflow.examples.tutorials.mnist import input_data | |
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | |
def weight_variable(shape): | |
initial = tf.truncated_normal(shape, stddev=0.1) | |
return tf.Variable(initial) | |
def bias_variable(shape): | |
initial = tf.constant(0.1, shape=shape) | |
return tf.Variable(initial) | |
def conv2d(x, W): | |
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') | |
def max_pool_2x2(x): | |
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], | |
strides=[1, 2, 2, 1], padding='SAME') | |
# 784 pixels (28 x 28 pixel images), 10 possibile outputs | |
x = tf.placeholder(tf.float32, [None, 784]) # placeholder | |
W = tf.Variable(tf.zeros([784, 10])) # weights | |
b = tf.Variable(tf.zeros([10])) # biases | |
# First convolutional layer | |
x_image = tf.reshape(x, [-1,28,28,1]) | |
W_conv1 = weight_variable([5, 5, 1, 32]) | |
b_conv1 = bias_variable([32]) | |
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) | |
h_pool1 = max_pool_2x2(h_conv1) | |
# Second convolutional layer | |
W_conv2 = weight_variable([5, 5, 32, 64]) | |
b_conv2 = bias_variable([64]) | |
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) | |
h_pool2 = max_pool_2x2(h_conv2) | |
# Densely connected layer | |
W_fc1 = weight_variable([7 * 7 * 64, 1024]) | |
b_fc1 = bias_variable([1024]) | |
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) | |
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) | |
# Dropout to prevent overfitting | |
keep_prob = tf.placeholder(tf.float32) | |
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) | |
# Softmax | |
W_fc2 = weight_variable([1024, 10]) | |
b_fc2 = bias_variable([10]) | |
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) | |
# y prime is the true distribution | |
y_ = tf.placeholder(tf.float32, [None, 10]) | |
# Use cross-entropy for the cost function | |
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv)) | |
# Use Adam Optimizer on the cost function for training | |
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) | |
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) | |
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | |
init = tf.initialize_all_variables() | |
sess = tf.InteractiveSession() | |
sess.run(init) | |
for i in range(20000): | |
batch = mnist.train.next_batch(50) | |
if i%100 == 0: | |
train_accuracy = accuracy.eval(feed_dict={ | |
x:batch[0], y_: batch[1], keep_prob: 1.0}) | |
print("step %d, training accuracy %g"%(i, train_accuracy)) | |
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment