Skip to content

Instantly share code, notes, and snippets.

@yum-dev
Created July 22, 2019 08:49
Show Gist options
  • Save yum-dev/64f75affc25ba3f4ea05572e2c51a1c9 to your computer and use it in GitHub Desktop.
Save yum-dev/64f75affc25ba3f4ea05572e2c51a1c9 to your computer and use it in GitHub Desktop.
CNN on Tensorflow MNIST data.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import tensorflow as tf"
]
},
{
"cell_type": "code",
"execution_count": 49,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.examples.tutorials.mnist import input_data"
]
},
{
"cell_type": "code",
"execution_count": 50,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
"Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
"Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
"Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
]
}
],
"source": [
"mnist = input_data.read_data_sets(\"MNIST_data/\",one_hot=True)"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# HELPER"
]
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# INIT WEIGHTS\n",
"def init_weights(shape):\n",
" init_random_dist = tf.truncated_normal(shape, stddev=0.1)\n",
" return tf.Variable(init_random_dist)"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# INIT BIAS\n",
"def init_bias(shape):\n",
" init_bias_vals = tf.constant(0.1, shape=shape)\n",
" return tf.Variable(init_bias_vals)"
]
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# CONV2D\n",
"def conv2d(x,W):\n",
" # x --> input layer [batch, H(height), W(width), Channels]\n",
" # W --> [filter H, filter W, Channels IN, Channels OUT]\n",
" \n",
" return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# POOLING\n",
"def max_pool_2by2(x):\n",
" # x --> [batch, h, w, c]\n",
" return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')"
]
},
{
"cell_type": "code",
"execution_count": 56,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Convolutional layer\n",
"def convolutional_layer(input_x, shape):\n",
" W = init_weights(shape)\n",
" b = init_bias([shape[3]])\n",
" return tf.nn.relu(conv2d(input_x,W)+b)"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Normal fully connected layer\n",
"def normal_full_layer(input_layer, size):\n",
" input_size = int(input_layer.get_shape()[1])\n",
" W = init_weights([input_size, size])\n",
" b = init_bias([size])\n",
" return tf.matmul(input_layer, W) + b"
]
},
{
"cell_type": "code",
"execution_count": 58,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Placeholders\n",
"x = tf.placeholder(tf.float32, shape=[None,784]) # 784 because there are 784 pixels in each image\n",
"y_true = tf.placeholder(tf.float32, shape=[None,10]) # 10 because lables are one-hot encoded between 0-9"
]
},
{
"cell_type": "code",
"execution_count": 59,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Layer\n",
"x_image = tf.reshape(x,[-1,28,28,1]) # Converting flattended out image back to original grayscale image"
]
},
{
"cell_type": "code",
"execution_count": 60,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"convo_1 = convolutional_layer(x_image,shape=[5,5,1,32])\n",
"convo_1_pooling = max_pool_2by2(convo_1)"
]
},
{
"cell_type": "code",
"execution_count": 61,
"metadata": {},
"outputs": [],
"source": [
"convo_2 = convolutional_layer(convo_1, shape=[5,5,32,64])\n",
"convo_2_pooling = max_pool_2by2(convo_2)"
]
},
{
"cell_type": "code",
"execution_count": 62,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"convo_2_flat = tf.reshape(convo_2_pooling,[-1,7*7*64])\n",
"full_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat,1024))"
]
},
{
"cell_type": "code",
"execution_count": 63,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Dropout\n",
"hold_prob = tf.placeholder(tf.float32)\n",
"full_one_dropout = tf.nn.dropout(full_layer_one, keep_prob=hold_prob)"
]
},
{
"cell_type": "code",
"execution_count": 64,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"y_pred = normal_full_layer(full_one_dropout,10)"
]
},
{
"cell_type": "code",
"execution_count": 65,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Loss function\n",
"cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_pred))"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Optimizer \n",
"optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)\n",
"train = optimizer.minimize(cross_entropy)"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"init = tf.global_variables_initializer()"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"ename": "InvalidArgumentError",
"evalue": "logits and labels must be same size: logits_size=[200,10] labels_size=[50,10]\n\t [[Node: SoftmaxCrossEntropyWithLogits_2 = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"](Reshape_13, Reshape_14)]]\n\nCaused by op 'SoftmaxCrossEntropyWithLogits_2', defined at:\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel_launcher.py\", line 16, in <module>\n app.launch_new_instance()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/kernelapp.py\", line 477, in start\n ioloop.IOLoop.instance().start()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/zmq/eventloop/ioloop.py\", line 177, in start\n super(ZMQIOLoop, self).start()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tornado/ioloop.py\", line 888, in start\n handler_func(fd_obj, events)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tornado/stack_context.py\", line 277, in null_wrapper\n return fn(*args, **kwargs)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 440, in _handle_events\n self._handle_recv()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 472, in _handle_recv\n self._run_callback(callback, msg)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 414, in _run_callback\n callback(*args, **kwargs)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tornado/stack_context.py\", line 277, in null_wrapper\n return fn(*args, **kwargs)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 283, in dispatcher\n return self.dispatch_shell(stream, msg)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 235, in dispatch_shell\n handler(stream, idents, msg)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 399, in execute_request\n user_expressions, allow_stdin)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/ipkernel.py\", line 196, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/zmqshell.py\", line 533, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2698, in run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2802, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2862, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"<ipython-input-65-ebc1ce0b66fd>\", line 2, in <module>\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_pred))\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py\", line 1597, in softmax_cross_entropy_with_logits\n precise_logits, labels, name=name)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/ops/gen_nn_ops.py\", line 2385, in _softmax_cross_entropy_with_logits\n features=features, labels=labels, name=name)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py\", line 767, in apply_op\n op_def=op_def)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\", line 2630, in create_op\n original_op=self._default_original_op, op_def=op_def)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\", line 1204, in __init__\n self._traceback = self._graph._extract_stack() # pylint: disable=protected-access\n\nInvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[200,10] labels_size=[50,10]\n\t [[Node: SoftmaxCrossEntropyWithLogits_2 = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"](Reshape_13, Reshape_14)]]\n",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m~/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1327\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1328\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1305\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1306\u001b[0;31m status, run_metadata)\n\u001b[0m\u001b[1;32m 1307\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/tfdeeplearning/lib/python3.5/contextlib.py\u001b[0m in \u001b[0;36m__exit__\u001b[0;34m(self, type, value, traceback)\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 66\u001b[0;31m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 67\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py\u001b[0m in \u001b[0;36mraise_exception_on_not_ok_status\u001b[0;34m()\u001b[0m\n\u001b[1;32m 465\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpywrap_tensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_Message\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatus\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 466\u001b[0;31m pywrap_tensorflow.TF_GetCode(status))\n\u001b[0m\u001b[1;32m 467\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mInvalidArgumentError\u001b[0m: logits and labels must be same size: logits_size=[200,10] labels_size=[50,10]\n\t [[Node: SoftmaxCrossEntropyWithLogits_2 = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"](Reshape_13, Reshape_14)]]",
"\nDuring handling of the above exception, another exception occurred:\n",
"\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-68-53c2650b9d08>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mbatch_x\u001b[0m \u001b[0;34m,\u001b[0m \u001b[0mbatch_y\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmnist\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnext_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m50\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0my_true\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mhold_prob\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m0.5\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;31m# PRINT OUT A MESSAGE EVERY 100 STEPS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 893\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 894\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 895\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 896\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 897\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1122\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1123\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1124\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1125\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1126\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1319\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1320\u001b[0m return self._do_call(_run_fn, self._session, feeds, fetches, targets,\n\u001b[0;32m-> 1321\u001b[0;31m options, run_metadata)\n\u001b[0m\u001b[1;32m 1322\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1323\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1338\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1339\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1340\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1341\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1342\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mInvalidArgumentError\u001b[0m: logits and labels must be same size: logits_size=[200,10] labels_size=[50,10]\n\t [[Node: SoftmaxCrossEntropyWithLogits_2 = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"](Reshape_13, Reshape_14)]]\n\nCaused by op 'SoftmaxCrossEntropyWithLogits_2', defined at:\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel_launcher.py\", line 16, in <module>\n app.launch_new_instance()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/kernelapp.py\", line 477, in start\n ioloop.IOLoop.instance().start()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/zmq/eventloop/ioloop.py\", line 177, in start\n super(ZMQIOLoop, self).start()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tornado/ioloop.py\", line 888, in start\n handler_func(fd_obj, events)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tornado/stack_context.py\", line 277, in null_wrapper\n return fn(*args, **kwargs)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 440, in _handle_events\n self._handle_recv()\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 472, in _handle_recv\n self._run_callback(callback, msg)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 414, in _run_callback\n callback(*args, **kwargs)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tornado/stack_context.py\", line 277, in null_wrapper\n return fn(*args, **kwargs)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 283, in dispatcher\n return self.dispatch_shell(stream, msg)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 235, in dispatch_shell\n handler(stream, idents, msg)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 399, in execute_request\n user_expressions, allow_stdin)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/ipkernel.py\", line 196, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/ipykernel/zmqshell.py\", line 533, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2698, in run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2802, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2862, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"<ipython-input-65-ebc1ce0b66fd>\", line 2, in <module>\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_pred))\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py\", line 1597, in softmax_cross_entropy_with_logits\n precise_logits, labels, name=name)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/ops/gen_nn_ops.py\", line 2385, in _softmax_cross_entropy_with_logits\n features=features, labels=labels, name=name)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py\", line 767, in apply_op\n op_def=op_def)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\", line 2630, in create_op\n original_op=self._default_original_op, op_def=op_def)\n File \"/Users/majain/anaconda3/envs/tfdeeplearning/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\", line 1204, in __init__\n self._traceback = self._graph._extract_stack() # pylint: disable=protected-access\n\nInvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[200,10] labels_size=[50,10]\n\t [[Node: SoftmaxCrossEntropyWithLogits_2 = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"](Reshape_13, Reshape_14)]]\n"
]
}
],
"source": [
"steps = 5000\n",
"\n",
"with tf.Session() as sess:\n",
" \n",
" sess.run(init)\n",
" \n",
" for i in range(steps):\n",
" \n",
" batch_x , batch_y = mnist.train.next_batch(50)\n",
" \n",
" sess.run(train,feed_dict={x:batch_x,y_true:batch_y,hold_prob:0.5})\n",
" \n",
" # PRINT OUT A MESSAGE EVERY 100 STEPS\n",
" if i%100 == 0:\n",
" \n",
" print('Currently on step {}'.format(i))\n",
" print('Accuracy is:')\n",
" # Test the Train Model\n",
" matches = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1))\n",
"\n",
" acc = tf.reduce_mean(tf.cast(matches,tf.float32))\n",
"\n",
" print(sess.run(acc,feed_dict={x:mnist.test.images,y_true:mnist.test.labels,hold_prob:1.0}))\n",
" print('\\n')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment