Skip to content

Instantly share code, notes, and snippets.

@naoyashiga
Created March 5, 2017 06:12
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save naoyashiga/ddf3d9d0fba909e77736b498261dfb7c to your computer and use it in GitHub Desktop.
Save naoyashiga/ddf3d9d0fba909e77736b498261dfb7c to your computer and use it in GitHub Desktop.
AutoEncoder for MNIST
# -*- coding: utf-8 -*-
# Inspired
# [TensorFlow-Examples/autoencoder.py at master · aymericdamien/TensorFlow-Examples](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/autoencoder.py)
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class AutoEncoder(object):
def __init__(self,
activation=tf.nn.tanh,
):
self.activation = activation
def encoder(self, x):
layer_1 = tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1'])
return layer_1
def decoder(self, z):
layer_1 = self.activation(tf.add(tf.matmul(z, weights['decoder_h1']), biases['decoder_b1']))
return layer_1
def train(self, optimizer, cost):
# tf.summary.scalar('loss', cost)
# summary_op = tf.summary.merge_all()
# summary_writer = tf.summary.FileWriter('data', graph=sess.graph)
total_batch = int(mnist.train.num_examples/batch_size)
saver = tf.train.Saver(max_to_keep=10)
for epoch in range(training_epochs):
for i in range(total_batch):
batch_xs, _ = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
saver.save(sess, "checkpoints/model", global_step=epoch)
# summary_str = sess.run(summary_op, feed_dict=feed_dict)
# summary_writer.add_summary(summary_str, epoch)
print("Optimization Finished!")
if __name__ == '__main__':
learning_rate = 0.1
training_epochs = 10000
batch_size = 256
display_step = 50
examples_to_show = 10
n_x_hidden_1 = 256
n_x_input = 28 * 28
X = tf.placeholder("float", [None, n_x_input])
weights = {
'encoder_h1': tf.Variable(xavier_init(n_x_input, n_x_hidden_1)),
'decoder_h1': tf.Variable(xavier_init(n_x_hidden_1, n_x_input)),
# 'encoder_h1': tf.Variable(tf.random_normal([n_x_input, n_x_hidden_1])),
# 'decoder_h1': tf.Variable(tf.random_normal([n_x_hidden_1, n_x_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.zeros([n_x_hidden_1])),
'decoder_b1': tf.Variable(tf.zeros([n_x_input])),
}
ae = AutoEncoder()
encoder_op = ae.encoder(X)
decoder_op_x = ae.decoder(encoder_op)
y_pred = decoder_op_x
y_true = X
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# optimizer = tf.train.AdadeltaOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(cost)
# optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
ae.train(optimizer, cost)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment