Skip to content

Instantly share code, notes, and snippets.

@phizaz
Created July 8, 2018 14:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save phizaz/eef722c3a3c465bf44d611fe396e607e to your computer and use it in GitHub Desktop.
Save phizaz/eef722c3a3c465bf44d611fe396e607e to your computer and use it in GitHub Desktop.
Tensorflow summary v2 eager mode
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
save_path = 'logs/test10'
graph = tf.Graph()
with graph.as_default():
global_step = tf.train.create_global_step()
writer = tf.contrib.summary.create_file_writer(save_path)
with writer.as_default():
tf.contrib.summary.always_record_summaries()
# simulate dataset
fake_dataset = np.random.randn(1000, 100).astype(np.float32)
fake_label = np.random.randint(low=0, high=9, size=1000)
# preparing a fake dataset
with graph.as_default():
x = tf.data.Dataset.from_tensor_slices(fake_dataset)
y = tf.data.Dataset.from_tensor_slices(fake_label)
data = tf.data.Dataset.zip((x, y))
data = data.shuffle(10000)
data = data.batch(32)
# define the model
with graph.as_default():
with writer.as_default():
# construct a simple classifier
net = tf.keras.Sequential([
tf.keras.layers.Dense(300, activation=tf.nn.relu),
tf.keras.layers.Dense(10)
])
optimizer = tf.train.AdamOptimizer(0.001)
def train(net, optimizer, x, y):
with tf.contrib.eager.GradientTape() as tape:
prediction = net(x)
loss = tf.losses.sparse_softmax_cross_entropy(y, prediction)
grads = tape.gradient(loss, net.variables)
grads_vars = zip(grads, net.variables)
optimizer.apply_gradients(
grads_vars,
global_step=tf.train.get_global_step()
)
# here is how you log every step (n=1)
with tf.contrib.summary.record_summaries_every_n_global_steps(1):
tf.contrib.summary.scalar('loss', loss)
return loss
# start the training process
with graph.as_default():
with writer.as_default():
# initialize the summary writer
tf.contrib.summary.initialize()
# run until the dataset is exhausted
for x, y in data:
loss = train(net, optimizer, x, y)
print(float(loss))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment