Last active
October 31, 2022 05:44
-
-
Save jeremychone/9441655e433713872dcc7eeedc855a5d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
## Tutorial >> https://www.tensorflow.org/get_started/mnist/beginners | |
import os | |
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' | |
## https://github.com/tensorflow/tensorflow/issues/7778 | |
import tensorflow as tf | |
from tensorflow.examples.tutorials.mnist import input_data | |
## --------- Utils --------- ## | |
# One should generally initialize weights with a small amount of noise for symmetry breaking, and to prevent 0 gradients. | |
def weight_variable(shape): | |
initial = tf.truncated_normal(shape, stddev=0.1) | |
return tf.Variable(initial) | |
# Since we're using ReLU neurons, it is also good practice to initialize them with a slightly positive initial bias to avoid "dead neurons" | |
def bias_variable(shape): | |
initial = tf.constant(0.1, shape=shape) | |
return tf.Variable(initial) | |
# Our convolutions uses a stride of one and are zero padded so that the output is the same size as the input. | |
def conv2d(x, W): | |
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') | |
# Our pooling is plain old max pooling over 2x2 blocks. | |
def max_pool_2x2(x): | |
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], | |
strides=[1, 2, 2, 1], padding='SAME') | |
## --------- /Utils --------- ## | |
## load and extract the training and test data | |
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | |
# mnist.train.images 55000 images, 784 pixels (28x28, but flatten as array ofr 784) | |
# mnist.train.labels 55000 labels, 10 one_hot dimentiona (0..9) | |
# Input, dimension number of training set (i.e. 55000) per the 28x28 flatten 784 bit array | |
x = tf.placeholder(tf.float32, shape=[None, 784]) | |
# Expected output for training (one_hot labels) | |
y_ = tf.placeholder(tf.float32, shape=[None, 10]) | |
## --------- First Convolution --------- ## | |
# The convolution will compute 32 features for each 5x5 patch. Its weight tensor will have a shape of [5, 5, 1, 32]. | |
# The first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels | |
W_conv1 = weight_variable([5, 5, 1, 32]) | |
# We will also have a bias vector with a component for each output channel. | |
b_conv1 = bias_variable([32]) | |
# To apply the layer, we first reshape x to a 4d tensor, with the second and third dimensions corresponding to image width and height, | |
# and the final dimension corresponding to the number of color channels. | |
x_image = tf.reshape(x, [-1, 28, 28, 1]) | |
# We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool. | |
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) | |
# The max_pool_2x2 method will reduce the image size to 14x14. | |
h_pool1 = max_pool_2x2(h_conv1) | |
## --------- /First Convolution --------- ## | |
## --------- Second Convolution --------- ## | |
# The second layer will have 64 features for each 5x5 patch. | |
# The third dimension match the 32 output of conv1 | |
W_conv2 = weight_variable([5, 5, 32, 64]) | |
b_conv2 = bias_variable([64]) | |
# Note the "x" os the output of convolution 1 | |
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) | |
# Reduce to 7x7 | |
h_pool2 = max_pool_2x2(h_conv2) | |
## --------- /Second Convolution --------- ## | |
## --------- Densely Connected --------- ## | |
# Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. | |
# We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU. | |
W_fc1 = weight_variable([7 * 7 * 64, 1024]) | |
b_fc1 = bias_variable([1024]) | |
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) | |
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) | |
## --------- /Densely Connected --------- ## | |
## --------- Dropout --------- ## | |
# To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output | |
# is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. | |
# TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling | |
keep_prob = tf.placeholder(tf.float32) | |
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) | |
## --------- /Dropout --------- ## | |
## --------- Readout Layer --------- ## | |
# Finally, we add a layer, just like for the one layer softmax regression above. | |
W_fc2 = weight_variable([1024, 10]) | |
b_fc2 = bias_variable([10]) | |
# The Final Model | |
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 | |
## --------- /Readout Layer --------- ## | |
# Build the loss | |
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) | |
# train step with the AdamOptimizer replacing the gradient decent | |
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) | |
# Compute prediction accuracy (this is for logging the progress) | |
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1)) | |
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | |
with tf.Session() as sess: | |
# Initialize | |
sess.run(tf.global_variables_initializer()) | |
# 20k training operations | |
for i in range(20000): | |
# get the batch | |
batch = mnist.train.next_batch(50) | |
# print the learning progress every 100 iteration | |
if i % 100 == 0: | |
train_accuracy = accuracy.eval(feed_dict={ | |
x: batch[0], y_: batch[1], keep_prob: 1.0}) | |
print('step %d, training accuracy %g' % (i, train_accuracy)) | |
# train (this will do the back propagation) | |
# Note: We will include the additional parameter keep_prob in feed_dict to control the dropout rate. | |
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) | |
# Now training is done, evaluate the test images | |
print('test accuracy %g' % accuracy.eval(feed_dict={ | |
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment