Skip to content

Instantly share code, notes, and snippets.

@chadrick-kwag
Created September 6, 2018 15:55
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save chadrick-kwag/5c8c2c8afc77e6b324d371bf1b6a6e03 to your computer and use it in GitHub Desktop.
Save chadrick-kwag/5c8c2c8afc77e6b324d371bf1b6a6e03 to your computer and use it in GitHub Desktop.
tensorflow example to demonstrate how to do lazy summary addition.
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim.nets as nets
# tf version: 1.10.0
def get_random_input_and_label(batch_size, class_size):
# for demonstration purpose, I'm going to reuse a random input and label
random_input = np.random.rand(batch_size, 299, 299, 3)
random_index = np.random.randint(0, 5, batch_size)
random_output = np.zeros((batch_size, class_size))
for index, val in enumerate(random_index):
random_output[index, val] = 1.0
return random_input, random_output
class_size = 5
# build some model
input_ph = tf.placeholder(tf.float32, [None, 299, 299, 3])
onehot_labels_ph = tf.placeholder(tf.float32, [None, class_size])
# for deatil on the model, check out https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/contrib/slim/python/slim/nets/inception_v3.py
logits_ts, end_points = nets.inception.inception_v3(input_ph, num_classes=5)
prediction_ts = end_points['Predictions']
loss_ts = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels_ph, logits=logits_ts)
optimizer_op = tf.train.AdamOptimizer(0.001).minimize(loss_ts)
# calculating accuracy inside the graph
pred_argmax_ts = tf.argmax(prediction_ts, axis=1)
label_argmax_ts = tf.argmax(onehot_labels_ph, axis=1)
# convert bool to float value
compare_ts = tf.to_float(tf.equal(pred_argmax_ts, label_argmax_ts))
same_count = tf.reduce_sum(compare_ts)
number_of_batch = tf.to_float(tf.shape(pred_argmax_ts)[0])
accuracy_ts = same_count / number_of_batch
# in training, we want to log loss value, accuracy value
loss_summary = tf.summary.scalar("loss/loss", loss_ts)
train_accuracy_summary = tf.summary.scalar("metric/acc", accuracy_ts)
# will detect loss_summary and train_accuracy_summary
train_summary_op = tf.summary.merge_all()
test_accuracy_summary = tf.summary.scalar("test/acc", accuracy_ts)
test_summary_op = tf.summary.merge([test_accuracy_summary])
initop = tf.global_variables_initializer()
with tf.Session() as sess:
writer = tf.summary.FileWriter("tfsummary", session= sess)
sess.run(initop)
steps = 20
train_input, train_label = get_random_input_and_label(4, class_size)
test_input, test_label = get_random_input_and_label(4, class_size)
for step in range(steps):
train_summary, loss_val, prediction, _ = sess.run([train_summary_op, loss_ts, prediction_ts, optimizer_op],
feed_dict={input_ph: train_input, onehot_labels_ph: train_label})
writer.add_summary(train_summary, global_step = step)
print("train done for step={}".format(step))
if step!=0 and step%5==0:
test_summary = sess.run(test_summary_op, feed_dict={input_ph: test_input, onehot_labels_ph: test_label})
writer.add_summary(test_summary, global_step = step)
print("test done at step={}".format(step))
print("end of code")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment