Created
February 5, 2016 23:01
-
-
Save igorcoding/c7409b19b6642f0ea77d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
import tensorflow.examples.tutorials.mnist.input_data | |
import numpy as np | |
mnist = tensorflow.examples.tutorials.mnist.input_data.read_data_sets("MNIST_data/", one_hot=True) | |
image_size = 28 | |
n_labels = 10 | |
def create_w_b(n_prev_layer, n_next_layer): | |
w = tf.Variable(tf.random_normal([n_prev_layer, n_next_layer])) | |
b = tf.Variable(tf.random_normal([n_next_layer])) | |
return w, b | |
def accuracy(predictions, labels): | |
return 100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0] | |
def main(): | |
start_learn_rate = 0.01 | |
reg = 0.001 | |
dropout1 = 0.9 | |
dropout2 = 1.0 | |
graph = tf.Graph() | |
with graph.as_default(): | |
x = tf.placeholder(tf.float32, [None, image_size * image_size]) | |
y = tf.placeholder(tf.float32, [None, n_labels]) | |
global_step = tf.Variable(0, trainable=False) | |
learning_rate = tf.train.exponential_decay(start_learn_rate, global_step, 100, 0.90) | |
w1, b1 = create_w_b(image_size * image_size, 1024) | |
# w2, b2 = create_w_b(512, 256) | |
w3, b3 = create_w_b(1024, n_labels) | |
h1 = tf.add(tf.matmul(x, w1), b1) | |
a1 = tf.nn.relu(h1) | |
# a1_dropped = tf.nn.dropout(a1, dropout1) | |
# h2 = tf.add(tf.matmul(a1_dropped, w2), b2) | |
# a2 = tf.nn.relu(h2) | |
# a2_dropped = tf.nn.dropout(a2, dropout2) | |
h3 = tf.add(tf.matmul(a1, w3), b3) | |
y_ = tf.nn.softmax(h3) | |
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(h3, y)) \ | |
+ reg * (tf.nn.l2_loss(w1) + tf.nn.l2_loss(w3)) | |
optimizer = tf.train.AdamOptimizer(start_learn_rate).minimize(loss, global_step=global_step) | |
num_steps = 3001 | |
batch_size = 128 | |
with tf.Session(graph=graph) as session: | |
tf.initialize_all_variables().run() | |
print "Initialized" | |
for step in xrange(num_steps): | |
batch_xs, batch_ys = mnist.train.next_batch(batch_size) | |
feed_dict = {x: batch_xs, y: batch_ys} | |
_, l, predictions = session.run([optimizer, loss, y_], feed_dict=feed_dict) | |
if step % 500 == 0: | |
print "Current learning rate:", learning_rate.eval() | |
print "Minibatch loss at step", step, ":", l | |
print "Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_ys) | |
print "Validation accuracy: %.1f%%" % accuracy(y_.eval(feed_dict={x: mnist.validation.images}), | |
mnist.validation.labels) | |
print "Test accuracy: %.1f%%" % accuracy(y_.eval(feed_dict={x: mnist.test.images}), mnist.test.labels) | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment