Skip to content

Instantly share code, notes, and snippets.

@leVirve
Last active April 4, 2017 16:57
Show Gist options
  • Save leVirve/80428277f2d14515870fa18899fed17a to your computer and use it in GitHub Desktop.
Save leVirve/80428277f2d14515870fa18899fed17a to your computer and use it in GitHub Desktop.
Something about autoencoder-like network in Tensorflow, however the net structure is a crazy scribble. 😂 Just practice using file queue feature in TF.
import tensorflow as tf
def read_image(filename_queue):
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
img = tf.image.decode_jpeg(value)
img.set_shape((256, 512, 3))
img = tf.image.convert_image_dtype(img, dtype=tf.float32)
img = img * 2 - 1
img_a = img[:, :256, :]
img_b = img[:, 256:, :]
return img_a, img_b
def input_pipeline(batch_size, num_epochs=None):
filenames = tf.train.match_filenames_once('./datasets/facades/train/*.jpg')
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs, shuffle=True)
example_a, example_b = read_image(filename_queue)
min_after_dequeue = 100
capacity = min_after_dequeue + 3 * batch_size
example_batch = tf.train.shuffle_batch(
[example_a, example_b], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return example_batch
''''''
img_a, img_b = input_pipeline(32, num_epochs=10)
x = tf.layers.conv2d(img_a, 16, [3, 3], padding='same', activation=tf.nn.relu)
x = tf.layers.max_pooling2d(x, pool_size=[2, 2], strides=2)
x = tf.layers.conv2d(x, 8, [3, 3], padding='same', activation=tf.nn.relu)
x = tf.layers.max_pooling2d(x, pool_size=[2, 2], strides=2)
x = tf.layers.conv2d(x, 8, [3, 3], padding='same', activation=tf.nn.relu)
x = tf.layers.max_pooling2d(x, pool_size=[2, 2], strides=2)
x = tf.layers.conv2d(x, 8, [3, 3], padding='same', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 8, [3, 3], [2, 2], padding='same')
x = tf.layers.conv2d(x, 8, [3, 3], padding='same', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 8, [3, 3], [2, 2], padding='same')
x = tf.layers.conv2d(x, 16, [3, 3], padding='same', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 16, [3, 3], [2, 2], padding='same')
out = tf.layers.conv2d(x, 3, [3, 3], padding='same', activation=tf.nn.sigmoid)
cost = tf.reduce_mean(tf.pow(out - img_b, 2))
optimizer = tf.train.RMSPropOptimizer(0.0002).minimize(cost)
''''''
sess = tf.Session()
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
_, c = sess.run([optimizer, cost])
print(c)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment