Skip to content

Instantly share code, notes, and snippets.

@qbx2
Last active August 19, 2016 10:06
Show Gist options
  • Save qbx2/52f922771be31fcd1d569223caa86967 to your computer and use it in GitHub Desktop.
Save qbx2/52f922771be31fcd1d569223caa86967 to your computer and use it in GitHub Desktop.
import gzip
import tensorflow as tf
import struct
import numpy as np
import random
def one_hot_encode(i):
ret = [0] * 10
ret[i] = 1
return ret
def load_idx1_file(filename):
with gzip.GzipFile(filename) as f:
magic_number, num_items = struct.unpack('>LL', f.read(8))
assert magic_number == 0x0801
print('Loading %s (%d)'%(filename, num_items))
return [one_hot_encode(i) for i in f.read(num_items)]
def load_idx3_file(filename):
with gzip.GzipFile(filename) as f:
magic_number, num_images, num_rows, num_cols = struct.unpack('>LLLL', f.read(4 * 4))
assert magic_number == 0x0803
print('Loading %s (%d, %dx%d)'%(filename, num_images, num_rows, num_cols))
imgs = [(np.frombuffer(f.read(num_rows * num_cols), dtype=np.byte)&0xff)/255. for _ in range(num_images)]
return imgs
training_set_labels = load_idx1_file('mnist_data/train-labels-idx1-ubyte.gz')
training_set_images = load_idx3_file('mnist_data/train-images-idx3-ubyte.gz')
training_set = list(zip(training_set_images, training_set_labels))
test_set_labels = load_idx1_file('mnist_data/t10k-labels-idx1-ubyte.gz')
test_set_images = load_idx3_file('mnist_data/t10k-images-idx3-ubyte.gz')
test_set = list(zip(test_set_images, test_set_labels))
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# model
placeholder_x = tf.placeholder('float', shape=[None, 28 * 28]) # images
placeholder_y = tf.placeholder('float', shape=[None, 10]) # labels
x_image = tf.reshape(placeholder_x, [-1,28,28,1])
# conv1
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# conv2
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# fc1
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# fc2
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
pred = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
loss = -tf.reduce_sum(placeholder_y * tf.log(pred)) # cross entropy
train_step = tf.train.AdamOptimizer(.0001).minimize(loss)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(placeholder_y, 1), tf.argmax(pred, 1)), tf.float32))
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(10000):
x, y = zip(*random.sample(training_set, 100)) # sample 100 training data
_, tmp_loss, tmp_accuracy = sess.run([train_step, loss, accuracy], feed_dict={placeholder_x: x, placeholder_y: y})
if i % 100 == 0:
print('training avg_loss: {}, avg_accuracy: {}'.format(tmp_loss/100., tmp_accuracy))
x, y = zip(*test_set)
tmp_loss, tmp_accuracy = sess.run([loss, accuracy], feed_dict={placeholder_x: x, placeholder_y: y})
print('test avg_loss: {}, avg_accuracy: {}'.format(tmp_loss/len(test_set), tmp_accuracy))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment