Skip to content

Instantly share code, notes, and snippets.

@anyuzx
Created January 20, 2018 05:39
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save anyuzx/28c62650d13703ecd0d6aa3ae43132c5 to your computer and use it in GitHub Desktop.
Save anyuzx/28c62650d13703ecd0d6aa3ae43132c5 to your computer and use it in GitHub Desktop.
anisotropy = False
learning_rate = 0.005
batch_size = 200
h = 10
w = 10
channels = 1
x = tf.placeholder(tf.float32, [batch_size, h, w, channels])
y = tf.placeholder(tf.float32, [batch_size, h, w, channels])
linear_map = np.random.rand(h,w)
hidden_size = 100
rnn_out, _ = multi_dimensional_rnn_while_loop(rnn_size=hidden_size, input_data=x, sh=[1, 1])
# use linear activation function
model_out = slim.fully_connected(inputs=rnn_out,
num_outputs=1,
activation_fn=None)
# use a little different loss function from the original code
loss = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(y, model_out))))
grad_update = tf.train.AdamOptimizer(learning_rate).minimize(loss)
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
sess.run(tf.global_variables_initializer())
# Add tensorboard (Really usefull)
train_writer = tf.summary.FileWriter('Tensorboard_out' + '/MDLSTM',sess.graph)
steps = 1000
mypredict_result = []
loss_series = []
for i in range(steps):
batch = next_batch_linear_map(batch_size, h, w, linear_map, anisotropy)
st = time()
batch_x = np.expand_dims(batch[0], axis=3)
batch_y = np.expand_dims(batch[1], axis=3)
mypredict, loss_val, _ = sess.run([model_out, loss, grad_update], feed_dict={x: batch_x, y: batch_y})
mypredict_result.append([batch_x, batch_y, mypredict])
print('steps = {0} | loss = {1:.3f} | time {2:.3f}'.format(str(i).zfill(3),
loss_val,
time() - st))
loss_series.append([i+1, loss_val])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment