Last active
September 12, 2017 06:13
-
-
Save mananpal1997/d9c5c9397d0baee30fd4e664de1e690b to your computer and use it in GitHub Desktop.
DenseNet for MNIST data
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 1, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"import numpy as np\n", | |
"import tensorflow as tf\n", | |
"from tensorflow.examples.tutorials.mnist import input_data\n", | |
"from tensorflow.contrib.framework import arg_scope\n", | |
"from tensorflow.contrib.layers import batch_norm, flatten" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 2, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Extracting MNIST_DATA/train-images-idx3-ubyte.gz\n", | |
"Extracting MNIST_DATA/train-labels-idx1-ubyte.gz\n", | |
"Extracting MNIST_DATA/t10k-images-idx3-ubyte.gz\n", | |
"Extracting MNIST_DATA/t10k-labels-idx1-ubyte.gz\n" | |
] | |
} | |
], | |
"source": [ | |
"mnist = input_data.read_data_sets('MNIST_DATA', one_hot=True)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 3, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"growth_rate = 12\n", | |
"num_blocks = 2 # number of dense block + transition layers\n", | |
"init_learning_rate = 1e-1\n", | |
"dropout_rate = 0.2\n", | |
"\n", | |
"momentum = 0.9\n", | |
"weight_decay = 1e-4\n", | |
"\n", | |
"num_classes = 10\n", | |
"batch_size = 100\n", | |
"epochs = 50" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 4, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"def BN(x, training, scope):\n", | |
" with arg_scope(\n", | |
" [batch_norm],\n", | |
" scope=scope,\n", | |
" updates_collections=None,\n", | |
" decay=0.9,\n", | |
" center=True,\n", | |
" scale=True,\n", | |
" zero_debias_moving_mean=True\n", | |
" ):\n", | |
" return tf.cond(\n", | |
" training,\n", | |
" lambda: batch_norm(inputs=x, is_training=training, reuse=None),\n", | |
" lambda: batch_norm(inputs=x, is_training=training, reuse=True)\n", | |
" )\n", | |
"\n", | |
"def ReLU(x): return tf.nn.relu(x)\n", | |
"\n", | |
"def Conv(input, filter, kernel, stride=1, layer_name='conv'):\n", | |
" with tf.name_scope(layer_name):\n", | |
" network = tf.layers.conv2d(\n", | |
" inputs=input,\n", | |
" filters=filter,\n", | |
" kernel_size=kernel,\n", | |
" strides=stride,\n", | |
" padding='SAME'\n", | |
" )\n", | |
" return network\n", | |
"\n", | |
"def Dropout(x, rate, training): return tf.layers.dropout(inputs=x, training=training, rate=rate)\n", | |
"\n", | |
"def Avg_Pool(x, pool_size=[2, 2], stride=2, padding='VALID'):\n", | |
" return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)\n", | |
"\n", | |
"def Global_Avg_Pool(x, stride=1):\n", | |
" pool_size = np.shape(x)[1:3]\n", | |
" return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride)\n", | |
"\n", | |
"def Max_Pool(x, pool_size=[3, 3], stride=2, padding='VALID'):\n", | |
" return tf.layers.max_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)\n", | |
"\n", | |
"def Linear(x): return tf.layers.dense(inputs=x, units=num_classes, name='linear')\n", | |
"\n", | |
"def Concatenation(layers): return tf.concat(layers, axis=3)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 5, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"class DenseNet(object):\n", | |
" def __init__(self, x, num_blocks, filters, training):\n", | |
" self.num_blocks = num_blocks\n", | |
" self.filters = filters\n", | |
" self.training = training\n", | |
" \n", | |
" self.model = self.create_model(x)\n", | |
" \n", | |
" def create_model(self, x):\n", | |
" x = Conv(x, filter=2 * self.filters, kernel=[7, 7], stride=2, layer_name='conv0')\n", | |
" x = Max_Pool(x)\n", | |
" \n", | |
" for i in range(self.num_blocks):\n", | |
" x = self.dense_block(x, num_layers=4, layer_name='dense_%d' % i)\n", | |
" x = self.transition(x, scope='trans_%d' % i)\n", | |
" \n", | |
" x = self.dense_block(x, num_layers=32, layer_name='dense_final')\n", | |
" \n", | |
" x = BN(x, training=self.training, scope='linear_batch')\n", | |
" x = ReLU(x)\n", | |
" x = Global_Avg_Pool(x)\n", | |
" x = flatten(x)\n", | |
" x = Linear(x)\n", | |
" \n", | |
" return x\n", | |
" \n", | |
" def bottleneck(self, x, scope):\n", | |
" with tf.name_scope(scope):\n", | |
" x = BN(x, training=self.training, scope=scope + '_batch1')\n", | |
" x = ReLU(x)\n", | |
" x = Conv(x, filter=4 * self.filters, kernel=[1, 1], layer_name=scope + '_conv1')\n", | |
" x = Dropout(x, rate=dropout_rate, training=self.training)\n", | |
" \n", | |
" x = BN(x, training=self.training, scope=scope + '_batch2')\n", | |
" x = ReLU(x)\n", | |
" x = Conv(x, filter=self.filters, kernel=[3, 3], layer_name=scope + '_conv2')\n", | |
" x = Dropout(x, rate=dropout_rate, training=self.training)\n", | |
" \n", | |
" return x\n", | |
" \n", | |
" def transition(self, x, scope):\n", | |
" with tf.name_scope(scope):\n", | |
" x = BN(x, training=self.training, scope=scope + '_batch1')\n", | |
" x = ReLU(x)\n", | |
" x = Conv(x, filter=self.filters, kernel=[1, 1], layer_name=scope + '_conv1')\n", | |
" x = Dropout(x, rate=dropout_rate, training=self.training)\n", | |
" x = Avg_Pool(x)\n", | |
" \n", | |
" return x\n", | |
" \n", | |
" def dense_block(self, x, num_layers, layer_name):\n", | |
" with tf.name_scope(layer_name):\n", | |
" concat_layers = [x]\n", | |
" \n", | |
" bc = self.bottleneck(x, scope=layer_name + '_bottleneck_0')\n", | |
" concat_layers.append(bc)\n", | |
" \n", | |
" for i in range(num_layers - 1):\n", | |
" y = Concatenation(concat_layers)\n", | |
" bc = self.bottleneck(y, scope=layer_name + '_bottleneck_%d' % (i + 1))\n", | |
" concat_layers.append(bc)\n", | |
" \n", | |
" return bc" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 6, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Step: 0, Loss: 2.474635, Training Accuracy: 0.140000\n", | |
"Step: 100, Loss: 1.928257, Training Accuracy: 0.240000\n", | |
"Step: 200, Loss: 1.796032, Training Accuracy: 0.250000\n", | |
"Step: 300, Loss: 1.443111, Training Accuracy: 0.430000\n", | |
"Step: 400, Loss: 1.202338, Training Accuracy: 0.380000\n", | |
"Step: 500, Loss: 0.878169, Training Accuracy: 0.500000\n", | |
"Epoch: 1, Accuracy: 0.614000\n", | |
"Step: 0, Loss: 1.018301, Training Accuracy: 0.630000\n", | |
"Step: 100, Loss: 0.894778, Training Accuracy: 0.630000\n", | |
"Step: 200, Loss: 0.652446, Training Accuracy: 0.730000\n", | |
"Step: 300, Loss: 0.518871, Training Accuracy: 0.860000\n", | |
"Step: 400, Loss: 0.453392, Training Accuracy: 0.900000\n", | |
"Step: 500, Loss: 0.496037, Training Accuracy: 0.810000\n", | |
"Epoch: 2, Accuracy: 0.933700\n", | |
"Step: 0, Loss: 0.467244, Training Accuracy: 0.900000\n", | |
"Step: 100, Loss: 0.326253, Training Accuracy: 0.930000\n", | |
"Step: 200, Loss: 0.542360, Training Accuracy: 0.900000\n", | |
"Step: 300, Loss: 0.272812, Training Accuracy: 0.960000\n", | |
"Step: 400, Loss: 0.597115, Training Accuracy: 0.890000\n", | |
"Step: 500, Loss: 0.134161, Training Accuracy: 0.980000\n", | |
"Epoch: 3, Accuracy: 0.965300\n", | |
"Step: 0, Loss: 0.260523, Training Accuracy: 0.920000\n", | |
"Step: 100, Loss: 0.250051, Training Accuracy: 0.940000\n", | |
"Step: 200, Loss: 0.192698, Training Accuracy: 0.960000\n", | |
"Step: 300, Loss: 0.184986, Training Accuracy: 0.950000\n", | |
"Step: 400, Loss: 0.095427, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.155943, Training Accuracy: 0.970000\n", | |
"Epoch: 4, Accuracy: 0.934900\n", | |
"Step: 0, Loss: 0.370246, Training Accuracy: 0.910000\n", | |
"Step: 100, Loss: 0.178250, Training Accuracy: 0.960000\n", | |
"Step: 200, Loss: 0.245562, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.167641, Training Accuracy: 0.960000\n", | |
"Step: 400, Loss: 0.313479, Training Accuracy: 0.910000\n", | |
"Step: 500, Loss: 0.259531, Training Accuracy: 0.950000\n", | |
"Epoch: 5, Accuracy: 0.978100\n", | |
"Step: 0, Loss: 0.121623, Training Accuracy: 0.970000\n", | |
"Step: 100, Loss: 0.189817, Training Accuracy: 0.940000\n", | |
"Step: 200, Loss: 0.111885, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.185560, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.242126, Training Accuracy: 0.970000\n", | |
"Step: 500, Loss: 0.197734, Training Accuracy: 0.970000\n", | |
"Epoch: 6, Accuracy: 0.977800\n", | |
"Step: 0, Loss: 0.130177, Training Accuracy: 0.950000\n", | |
"Step: 100, Loss: 0.346705, Training Accuracy: 0.930000\n", | |
"Step: 200, Loss: 0.204020, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.067746, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.131821, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.154294, Training Accuracy: 0.980000\n", | |
"Epoch: 7, Accuracy: 0.964700\n", | |
"Step: 0, Loss: 0.202179, Training Accuracy: 0.940000\n", | |
"Step: 100, Loss: 0.165310, Training Accuracy: 0.960000\n", | |
"Step: 200, Loss: 0.112848, Training Accuracy: 0.960000\n", | |
"Step: 300, Loss: 0.339371, Training Accuracy: 0.920000\n", | |
"Step: 400, Loss: 0.178441, Training Accuracy: 0.950000\n", | |
"Step: 500, Loss: 0.099440, Training Accuracy: 0.960000\n", | |
"Epoch: 8, Accuracy: 0.976600\n", | |
"Step: 0, Loss: 0.144849, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.073945, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.047248, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.294962, Training Accuracy: 0.950000\n", | |
"Step: 400, Loss: 0.077182, Training Accuracy: 0.960000\n", | |
"Step: 500, Loss: 0.235688, Training Accuracy: 0.950000\n", | |
"Epoch: 9, Accuracy: 0.981300\n", | |
"Step: 0, Loss: 0.267049, Training Accuracy: 0.960000\n", | |
"Step: 100, Loss: 0.168234, Training Accuracy: 0.960000\n", | |
"Step: 200, Loss: 0.246668, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.127459, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.076825, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.078841, Training Accuracy: 0.990000\n", | |
"Epoch: 10, Accuracy: 0.976600\n", | |
"Step: 0, Loss: 0.308683, Training Accuracy: 0.950000\n", | |
"Step: 100, Loss: 0.029498, Training Accuracy: 0.970000\n", | |
"Step: 200, Loss: 0.172844, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.237482, Training Accuracy: 0.940000\n", | |
"Step: 400, Loss: 0.074228, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.179363, Training Accuracy: 0.970000\n", | |
"Epoch: 11, Accuracy: 0.986300\n", | |
"Step: 0, Loss: 0.047526, Training Accuracy: 0.970000\n", | |
"Step: 100, Loss: 0.073546, Training Accuracy: 0.940000\n", | |
"Step: 200, Loss: 0.134109, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.066138, Training Accuracy: 0.960000\n", | |
"Step: 400, Loss: 0.363917, Training Accuracy: 0.950000\n", | |
"Step: 500, Loss: 0.026165, Training Accuracy: 0.970000\n", | |
"Epoch: 12, Accuracy: 0.984500\n", | |
"Step: 0, Loss: 0.075272, Training Accuracy: 0.960000\n", | |
"Step: 100, Loss: 0.159148, Training Accuracy: 0.960000\n", | |
"Step: 200, Loss: 0.020943, Training Accuracy: 1.000000\n", | |
"Step: 300, Loss: 0.074014, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.038982, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.222453, Training Accuracy: 0.970000\n", | |
"Epoch: 13, Accuracy: 0.986200\n", | |
"Step: 0, Loss: 0.120779, Training Accuracy: 0.990000\n", | |
"Step: 100, Loss: 0.073912, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.121782, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.109796, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.167202, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.080977, Training Accuracy: 0.960000\n", | |
"Epoch: 14, Accuracy: 0.981300\n", | |
"Step: 0, Loss: 0.041737, Training Accuracy: 0.990000\n", | |
"Step: 100, Loss: 0.071293, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.338323, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.087923, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.135420, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.049542, Training Accuracy: 0.970000\n", | |
"Epoch: 15, Accuracy: 0.988200\n", | |
"Step: 0, Loss: 0.019581, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.062950, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.132158, Training Accuracy: 0.940000\n", | |
"Step: 300, Loss: 0.095271, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.126760, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.124714, Training Accuracy: 0.960000\n", | |
"Epoch: 16, Accuracy: 0.985800\n", | |
"Step: 0, Loss: 0.108088, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.079615, Training Accuracy: 0.990000\n", | |
"Step: 200, Loss: 0.017252, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.158327, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.063406, Training Accuracy: 0.970000\n", | |
"Step: 500, Loss: 0.076562, Training Accuracy: 0.990000\n", | |
"Epoch: 17, Accuracy: 0.981700\n", | |
"Step: 0, Loss: 0.182426, Training Accuracy: 0.960000\n", | |
"Step: 100, Loss: 0.189436, Training Accuracy: 0.950000\n", | |
"Step: 200, Loss: 0.049460, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.068724, Training Accuracy: 0.990000\n", | |
"Step: 400, Loss: 0.082657, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.058701, Training Accuracy: 0.990000\n", | |
"Epoch: 18, Accuracy: 0.989900\n", | |
"Step: 0, Loss: 0.150377, Training Accuracy: 0.990000\n", | |
"Step: 100, Loss: 0.080041, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.174258, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.024637, Training Accuracy: 1.000000\n", | |
"Step: 400, Loss: 0.088409, Training Accuracy: 0.950000\n", | |
"Step: 500, Loss: 0.346973, Training Accuracy: 0.980000\n", | |
"Epoch: 19, Accuracy: 0.988500\n", | |
"Step: 0, Loss: 0.098109, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.139357, Training Accuracy: 0.990000\n", | |
"Step: 200, Loss: 0.188815, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.253866, Training Accuracy: 0.960000\n", | |
"Step: 400, Loss: 0.029966, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.045387, Training Accuracy: 0.980000\n", | |
"Epoch: 20, Accuracy: 0.986400\n", | |
"Step: 0, Loss: 0.097561, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.206168, Training Accuracy: 0.960000\n", | |
"Step: 200, Loss: 0.106156, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.120006, Training Accuracy: 1.000000\n", | |
"Step: 400, Loss: 0.185097, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.061646, Training Accuracy: 0.980000\n", | |
"Epoch: 21, Accuracy: 0.978800\n", | |
"Step: 0, Loss: 0.087545, Training Accuracy: 0.990000\n", | |
"Step: 100, Loss: 0.075260, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.089630, Training Accuracy: 0.950000\n", | |
"Step: 300, Loss: 0.031041, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.030030, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.034553, Training Accuracy: 0.970000\n", | |
"Epoch: 22, Accuracy: 0.984600\n", | |
"Step: 0, Loss: 0.075973, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.079911, Training Accuracy: 0.960000\n", | |
"Step: 200, Loss: 0.072271, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.092702, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.039479, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.231029, Training Accuracy: 0.990000\n" | |
] | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Epoch: 23, Accuracy: 0.989000\n", | |
"Step: 0, Loss: 0.041267, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.145692, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.123330, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.053501, Training Accuracy: 0.960000\n", | |
"Step: 400, Loss: 0.039418, Training Accuracy: 0.970000\n", | |
"Step: 500, Loss: 0.083621, Training Accuracy: 0.980000\n", | |
"Epoch: 24, Accuracy: 0.985700\n", | |
"Step: 0, Loss: 0.044242, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.377823, Training Accuracy: 0.950000\n", | |
"Step: 200, Loss: 0.113801, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.069798, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.091559, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.066623, Training Accuracy: 0.990000\n", | |
"Epoch: 25, Accuracy: 0.985000\n", | |
"Step: 0, Loss: 0.124635, Training Accuracy: 0.990000\n", | |
"Step: 100, Loss: 0.065215, Training Accuracy: 0.990000\n", | |
"Step: 200, Loss: 0.147563, Training Accuracy: 0.960000\n", | |
"Step: 300, Loss: 0.206968, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.173499, Training Accuracy: 0.950000\n", | |
"Step: 500, Loss: 0.046948, Training Accuracy: 0.990000\n", | |
"Epoch: 26, Accuracy: 0.985200\n", | |
"Step: 0, Loss: 0.070764, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.320213, Training Accuracy: 0.960000\n", | |
"Step: 200, Loss: 0.036378, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.113923, Training Accuracy: 0.930000\n", | |
"Step: 400, Loss: 0.058063, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.046642, Training Accuracy: 0.980000\n", | |
"Epoch: 27, Accuracy: 0.985300\n", | |
"Step: 0, Loss: 0.219152, Training Accuracy: 0.960000\n", | |
"Step: 100, Loss: 0.114688, Training Accuracy: 0.990000\n", | |
"Step: 200, Loss: 0.086950, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.038894, Training Accuracy: 1.000000\n", | |
"Step: 400, Loss: 0.039790, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.041944, Training Accuracy: 1.000000\n", | |
"Epoch: 28, Accuracy: 0.985000\n", | |
"Step: 0, Loss: 0.047084, Training Accuracy: 0.960000\n", | |
"Step: 100, Loss: 0.099738, Training Accuracy: 0.970000\n", | |
"Step: 200, Loss: 0.143748, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.115018, Training Accuracy: 1.000000\n", | |
"Step: 400, Loss: 0.069370, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.130804, Training Accuracy: 0.990000\n", | |
"Epoch: 29, Accuracy: 0.984400\n", | |
"Step: 0, Loss: 0.014005, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.079484, Training Accuracy: 0.990000\n", | |
"Step: 200, Loss: 0.090689, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.036697, Training Accuracy: 1.000000\n", | |
"Step: 400, Loss: 0.031776, Training Accuracy: 1.000000\n", | |
"Step: 500, Loss: 0.204861, Training Accuracy: 0.950000\n", | |
"Epoch: 30, Accuracy: 0.985700\n", | |
"Step: 0, Loss: 0.061917, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.118006, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.063104, Training Accuracy: 0.960000\n", | |
"Step: 300, Loss: 0.050319, Training Accuracy: 0.990000\n", | |
"Step: 400, Loss: 0.095023, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.016757, Training Accuracy: 0.970000\n", | |
"Epoch: 31, Accuracy: 0.985300\n", | |
"Step: 0, Loss: 0.037285, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.234596, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.130324, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.054412, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.079657, Training Accuracy: 0.960000\n", | |
"Step: 500, Loss: 0.158014, Training Accuracy: 0.980000\n", | |
"Epoch: 32, Accuracy: 0.983000\n", | |
"Step: 0, Loss: 0.128567, Training Accuracy: 0.970000\n", | |
"Step: 100, Loss: 0.143610, Training Accuracy: 0.970000\n", | |
"Step: 200, Loss: 0.061628, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.104567, Training Accuracy: 1.000000\n", | |
"Step: 400, Loss: 0.116469, Training Accuracy: 0.960000\n", | |
"Step: 500, Loss: 0.109535, Training Accuracy: 0.980000\n", | |
"Epoch: 33, Accuracy: 0.984400\n", | |
"Step: 0, Loss: 0.025232, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.105486, Training Accuracy: 0.950000\n", | |
"Step: 200, Loss: 0.130121, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.161160, Training Accuracy: 0.960000\n", | |
"Step: 400, Loss: 0.063755, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.045842, Training Accuracy: 0.970000\n", | |
"Epoch: 34, Accuracy: 0.984700\n", | |
"Step: 0, Loss: 0.164121, Training Accuracy: 0.970000\n", | |
"Step: 100, Loss: 0.059262, Training Accuracy: 0.970000\n", | |
"Step: 200, Loss: 0.053989, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.133931, Training Accuracy: 0.990000\n", | |
"Step: 400, Loss: 0.078000, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.022494, Training Accuracy: 0.980000\n", | |
"Epoch: 35, Accuracy: 0.984900\n", | |
"Step: 0, Loss: 0.110658, Training Accuracy: 0.950000\n", | |
"Step: 100, Loss: 0.050623, Training Accuracy: 0.990000\n", | |
"Step: 200, Loss: 0.039574, Training Accuracy: 1.000000\n", | |
"Step: 300, Loss: 0.172854, Training Accuracy: 0.940000\n", | |
"Step: 400, Loss: 0.144027, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.165402, Training Accuracy: 0.990000\n", | |
"Epoch: 36, Accuracy: 0.984000\n", | |
"Step: 0, Loss: 0.197902, Training Accuracy: 0.960000\n", | |
"Step: 100, Loss: 0.112415, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.037926, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.152478, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.120318, Training Accuracy: 0.950000\n", | |
"Step: 500, Loss: 0.157562, Training Accuracy: 0.960000\n", | |
"Epoch: 37, Accuracy: 0.985200\n", | |
"Step: 0, Loss: 0.200034, Training Accuracy: 0.970000\n", | |
"Step: 100, Loss: 0.050124, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.080190, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.059310, Training Accuracy: 0.960000\n", | |
"Step: 400, Loss: 0.158290, Training Accuracy: 1.000000\n", | |
"Step: 500, Loss: 0.086090, Training Accuracy: 0.980000\n", | |
"Epoch: 38, Accuracy: 0.984000\n", | |
"Step: 0, Loss: 0.047151, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.028331, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.056709, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.012525, Training Accuracy: 1.000000\n", | |
"Step: 400, Loss: 0.083613, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.032334, Training Accuracy: 1.000000\n", | |
"Epoch: 39, Accuracy: 0.984400\n", | |
"Step: 0, Loss: 0.028644, Training Accuracy: 0.990000\n", | |
"Step: 100, Loss: 0.034539, Training Accuracy: 0.990000\n", | |
"Step: 200, Loss: 0.068945, Training Accuracy: 0.980000\n", | |
"Step: 300, Loss: 0.101828, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.087825, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.106344, Training Accuracy: 0.970000\n", | |
"Epoch: 40, Accuracy: 0.985300\n", | |
"Step: 0, Loss: 0.102353, Training Accuracy: 0.990000\n", | |
"Step: 100, Loss: 0.040507, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.089353, Training Accuracy: 1.000000\n", | |
"Step: 300, Loss: 0.086738, Training Accuracy: 0.960000\n", | |
"Step: 400, Loss: 0.082074, Training Accuracy: 0.960000\n", | |
"Step: 500, Loss: 0.029739, Training Accuracy: 0.980000\n", | |
"Epoch: 41, Accuracy: 0.985000\n", | |
"Step: 0, Loss: 0.135109, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.057333, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.050515, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.194938, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.252238, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.021746, Training Accuracy: 1.000000\n", | |
"Epoch: 42, Accuracy: 0.984700\n", | |
"Step: 0, Loss: 0.139070, Training Accuracy: 0.970000\n", | |
"Step: 100, Loss: 0.095446, Training Accuracy: 0.970000\n", | |
"Step: 200, Loss: 0.131267, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.101823, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.068933, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.019921, Training Accuracy: 1.000000\n", | |
"Epoch: 43, Accuracy: 0.985500\n", | |
"Step: 0, Loss: 0.108835, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.218925, Training Accuracy: 0.970000\n", | |
"Step: 200, Loss: 0.042999, Training Accuracy: 1.000000\n", | |
"Step: 300, Loss: 0.075021, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.069624, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.027849, Training Accuracy: 0.980000\n", | |
"Epoch: 44, Accuracy: 0.984800\n", | |
"Step: 0, Loss: 0.028700, Training Accuracy: 0.980000\n", | |
"Step: 100, Loss: 0.027716, Training Accuracy: 1.000000\n", | |
"Step: 200, Loss: 0.285039, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.030663, Training Accuracy: 0.990000\n", | |
"Step: 400, Loss: 0.077391, Training Accuracy: 0.970000\n", | |
"Step: 500, Loss: 0.037296, Training Accuracy: 0.990000\n", | |
"Epoch: 45, Accuracy: 0.984700\n", | |
"Step: 0, Loss: 0.032266, Training Accuracy: 0.970000\n", | |
"Step: 100, Loss: 0.260605, Training Accuracy: 0.950000\n", | |
"Step: 200, Loss: 0.082235, Training Accuracy: 1.000000\n", | |
"Step: 300, Loss: 0.132639, Training Accuracy: 0.950000\n", | |
"Step: 400, Loss: 0.027906, Training Accuracy: 0.970000\n", | |
"Step: 500, Loss: 0.049078, Training Accuracy: 0.970000\n" | |
] | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Epoch: 46, Accuracy: 0.984700\n", | |
"Step: 0, Loss: 0.152602, Training Accuracy: 0.950000\n", | |
"Step: 100, Loss: 0.042793, Training Accuracy: 1.000000\n", | |
"Step: 200, Loss: 0.156636, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.082997, Training Accuracy: 0.980000\n", | |
"Step: 400, Loss: 0.025179, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.054860, Training Accuracy: 0.990000\n", | |
"Epoch: 47, Accuracy: 0.984200\n", | |
"Step: 0, Loss: 0.014387, Training Accuracy: 1.000000\n", | |
"Step: 100, Loss: 0.133252, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.034201, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.020865, Training Accuracy: 0.990000\n", | |
"Step: 400, Loss: 0.066465, Training Accuracy: 0.990000\n", | |
"Step: 500, Loss: 0.073531, Training Accuracy: 0.990000\n", | |
"Epoch: 48, Accuracy: 0.984500\n", | |
"Step: 0, Loss: 0.067314, Training Accuracy: 0.960000\n", | |
"Step: 100, Loss: 0.012266, Training Accuracy: 0.980000\n", | |
"Step: 200, Loss: 0.034903, Training Accuracy: 0.990000\n", | |
"Step: 300, Loss: 0.014121, Training Accuracy: 0.990000\n", | |
"Step: 400, Loss: 0.057616, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.059816, Training Accuracy: 0.990000\n", | |
"Epoch: 49, Accuracy: 0.984400\n", | |
"Step: 0, Loss: 0.244725, Training Accuracy: 0.960000\n", | |
"Step: 100, Loss: 0.054189, Training Accuracy: 0.970000\n", | |
"Step: 200, Loss: 0.188488, Training Accuracy: 0.970000\n", | |
"Step: 300, Loss: 0.064585, Training Accuracy: 0.970000\n", | |
"Step: 400, Loss: 0.146086, Training Accuracy: 0.980000\n", | |
"Step: 500, Loss: 0.231101, Training Accuracy: 0.940000\n", | |
"Epoch: 50, Accuracy: 0.984400\n" | |
] | |
} | |
], | |
"source": [ | |
"x = tf.placeholder(tf.float32, shape=[None, 784])\n", | |
"label = tf.placeholder(tf.float32, shape=[None, 10])\n", | |
"training_flag = tf.placeholder(tf.bool)\n", | |
"learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n", | |
"\n", | |
"batch_images = tf.reshape(x, [-1, 28, 28, 1])\n", | |
"\n", | |
"logits = DenseNet(x=batch_images, num_blocks=num_blocks, filters=growth_rate, training=training_flag).model\n", | |
"\n", | |
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits))\n", | |
"\n", | |
"l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])\n", | |
"optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)\n", | |
"train = optimizer.minimize(cost + l2_loss * weight_decay)\n", | |
"\n", | |
"prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(label, 1))\n", | |
"accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))\n", | |
"\n", | |
"tf.summary.scalar('loss', cost)\n", | |
"tf.summary.scalar('accuracy', accuracy)\n", | |
"\n", | |
"saver = tf.train.Saver(tf.global_variables())\n", | |
"\n", | |
"with tf.Session() as sess:\n", | |
" ckpt = tf.train.get_checkpoint_state('./model')\n", | |
" \n", | |
" if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n", | |
" saver.restore(sess, ckpt.model_checkpoint_path)\n", | |
" else:\n", | |
" sess.run(tf.global_variables_initializer())\n", | |
"\n", | |
" merged = tf.summary.merge_all()\n", | |
"\n", | |
" steps = 0\n", | |
" epoch_learning_rate = init_learning_rate\n", | |
" \n", | |
" for epoch in range(epochs):\n", | |
" if epoch in [epochs // 2, (epochs * 3) // 4]: epoch_learning_rate //= 10\n", | |
"\n", | |
" num_batch = mnist.train.num_examples // batch_size\n", | |
"\n", | |
" for batch in range(num_batch):\n", | |
" batch_x, batch_y = mnist.train.next_batch(batch_size)\n", | |
"\n", | |
" train_feed_dict = {\n", | |
" x: batch_x,\n", | |
" label: batch_y,\n", | |
" learning_rate: epoch_learning_rate,\n", | |
" training_flag : True\n", | |
" }\n", | |
"\n", | |
" _, loss = sess.run([train, cost], feed_dict=train_feed_dict)\n", | |
"\n", | |
" if batch % 100 == 0:\n", | |
" steps += 100\n", | |
" \n", | |
" train_summary, train_accuracy = sess.run([merged, accuracy], feed_dict=train_feed_dict)\n", | |
" \n", | |
" print \"Step: %d, Loss: %f, Training Accuracy: %f\" % (batch, loss, train_accuracy)\n", | |
"# writer.add_summary(train_summary, global_step=epoch)\n", | |
"\n", | |
" test_feed_dict = {\n", | |
" x: mnist.test.images,\n", | |
" label: mnist.test.labels,\n", | |
" learning_rate: epoch_learning_rate,\n", | |
" training_flag : False\n", | |
" }\n", | |
"\n", | |
" test_accuracy = sess.run(accuracy, feed_dict=test_feed_dict)\n", | |
" print \"Epoch: %d, Accuracy: %f\" % (epoch + 1, test_accuracy)\n", | |
"\n", | |
" saver.save(sess=sess, save_path='./model/dense.ckpt')" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": "Python 2", | |
"language": "python", | |
"name": "python2" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 2 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython2", | |
"version": "2.7.12" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment