Last active
February 21, 2017 21:20
-
-
Save MInner/140c07e9028f5474fce98085bf4bef59 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 11, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Extracting /tmp/mnist/train-images-idx3-ubyte.gz\n", | |
"Extracting /tmp/mnist/train-labels-idx1-ubyte.gz\n", | |
"Extracting /tmp/mnist/t10k-images-idx3-ubyte.gz\n", | |
"Extracting /tmp/mnist/t10k-labels-idx1-ubyte.gz\n", | |
"iteration 0\t accuracy: 0.071\n", | |
"iteration 1000\t accuracy: 0.814\n", | |
"iteration 2000\t accuracy: 0.851\n", | |
"iteration 3000\t accuracy: 0.864\n", | |
"iteration 4000\t accuracy: 0.873\n", | |
"iteration 5000\t accuracy: 0.876\n", | |
"iteration 6000\t accuracy: 0.879\n", | |
"iteration 7000\t accuracy: 0.884\n", | |
"iteration 8000\t accuracy: 0.888\n", | |
"iteration 9000\t accuracy: 0.890\n", | |
"iteration 10000\t accuracy: 0.891\n", | |
"iteration 11000\t accuracy: 0.893\n", | |
"iteration 12000\t accuracy: 0.894\n", | |
"iteration 13000\t accuracy: 0.897\n", | |
"iteration 14000\t accuracy: 0.897\n", | |
"iteration 15000\t accuracy: 0.899\n", | |
"iteration 16000\t accuracy: 0.898\n", | |
"iteration 17000\t accuracy: 0.899\n", | |
"iteration 18000\t accuracy: 0.901\n", | |
"iteration 19000\t accuracy: 0.901\n" | |
] | |
} | |
], | |
"source": [ | |
"from __future__ import absolute_import\n", | |
"from __future__ import division\n", | |
"from __future__ import print_function\n", | |
"\n", | |
"import tensorflow as tf\n", | |
"from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n", | |
"\n", | |
"def main():\n", | |
" # import data\n", | |
" mnist = read_data_sets('/tmp/mnist/', one_hot=True)\n", | |
"\n", | |
" with tf.Graph().as_default() as g:\n", | |
" # where are you going to allocate memory and perform computations\n", | |
" with tf.device(\"/cpu:0\"):\n", | |
" \n", | |
" # define model \"input placeholders\", i.e. variables that are\n", | |
" # going to be substituted with input data on train/test time\n", | |
" x_ = tf.placeholder(tf.float32, [None, 784])\n", | |
" y_ = tf.placeholder(tf.float32, [None, 10])\n", | |
"\n", | |
" # create the actual model\n", | |
" scope_args = {'initializer': tf.random_normal_initializer()}\n", | |
" with tf.variable_scope(\"weights\", **scope_args):\n", | |
" W = tf.get_variable('W', shape=[784, 10])\n", | |
" b = tf.get_variable('b', shape=[10])\n", | |
" y_logits = tf.matmul(x_, W) + b\n", | |
"\n", | |
" # naive implementation of loss:\n", | |
" # > losses = y_ * tf.log(tf.nn.softmax(y_logits))\n", | |
" # > tf.reduce_mean(-tf.reduce_sum(losses, 1))\n", | |
" # can be numerically unstable.\n", | |
" #\n", | |
" # so here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n", | |
" # outputs of 'y', and then average across the batch.\n", | |
" \n", | |
" losses = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_logits)\n", | |
" cross_entropy_loss = tf.reduce_mean(losses)\n", | |
" train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy_loss)\n", | |
" \n", | |
" y_pred = tf.argmax(tf.nn.softmax(y_logits), dimension=1)\n", | |
" correct_prediction = tf.equal(y_pred, tf.argmax(y_, 1))\n", | |
" accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n", | |
"\n", | |
" with g.as_default(), tf.Session() as sess:\n", | |
" # that is how we \"execute\" statements \n", | |
" # (return None, e.g. init() or train_op())\n", | |
" # or compute parts of graph defined above (loss, output, etc.)\n", | |
" # given certain input (x_, y_)\n", | |
" sess.run(tf.initialize_all_variables())\n", | |
" \n", | |
" # train\n", | |
" for iter_i in range(20000):\n", | |
" batch_xs, batch_ys = mnist.train.next_batch(100)\n", | |
" sess.run(train_step, feed_dict={x_: batch_xs, y_: batch_ys})\n", | |
" \n", | |
" # test trained model\n", | |
" if iter_i % 1000 == 0:\n", | |
" tf_feed_dict = {x_: mnist.test.images, y_: mnist.test.labels}\n", | |
" acc_value = sess.run(accuracy, feed_dict=tf_feed_dict)\n", | |
" print('iteration %d\\t accuracy: %.3f'%(iter_i, acc_value))\n", | |
" \n", | |
"main()" | |
] | |
} | |
], | |
"metadata": { | |
"anaconda-cloud": {}, | |
"kernelspec": { | |
"display_name": "Python [conda env:tf11]", | |
"language": "python", | |
"name": "conda-env-tf11-py" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 3 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython3", | |
"version": "3.5.2" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 1 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment