Skip to content

Instantly share code, notes, and snippets.

@SamuelMarks
Created January 6, 2017 05:28
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save SamuelMarks/17e968288545042da4e718e886e458e3 to your computer and use it in GitHub Desktop.
Save SamuelMarks/17e968288545042da4e718e886e458e3 to your computer and use it in GitHub Desktop.
diff --git a/tutorials/image/cifar10/cifar10.py b/tutorials/image/cifar10/cifar10.py
index d99ffb9..4edcf68 100644
--- a/tutorials/image/cifar10/cifar10.py
+++ b/tutorials/image/cifar10/cifar10.py
@@ -90,8 +90,8 @@ def _activation_summary(x):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
- tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)
- tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity',
+ tf.summary.histogram_summary(tensor_name + '/activations', x)
+ tf.summary.scalar_summary(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
@@ -316,8 +316,8 @@ def _add_loss_summaries(total_loss):
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
- tf.contrib.deprecated.scalar_summary(l.op.name + ' (raw)', l)
- tf.contrib.deprecated.scalar_summary(l.op.name, loss_averages.average(l))
+ tf.summary.scalar_summary(l.op.name + ' (raw)', l)
+ tf.summary.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
@@ -345,7 +345,7 @@ def train(total_loss, global_step):
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
- tf.contrib.deprecated.scalar_summary('learning_rate', lr)
+ tf.summary.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
@@ -360,12 +360,12 @@ def train(total_loss, global_step):
# Add histograms for trainable variables.
for var in tf.trainable_variables():
- tf.contrib.deprecated.histogram_summary(var.op.name, var)
+ tf.summary.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
- tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad)
+ tf.summary.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
diff --git a/tutorials/image/cifar10/cifar10_input.py b/tutorials/image/cifar10/cifar10_input.py
index 7bfcb2e..38861d1 100644
--- a/tutorials/image/cifar10/cifar10_input.py
+++ b/tutorials/image/cifar10/cifar10_input.py
@@ -84,13 +84,13 @@ def read_cifar10(filename_queue):
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
- tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
+ tf.strided_slice(record_bytes, [0], [label_bytes], [1,1]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
- [label_bytes + image_bytes]),
+ [label_bytes + image_bytes], [1,1]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
@@ -132,7 +132,7 @@ def _generate_image_and_label_batch(image, label, min_queue_examples,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
- tf.contrib.deprecated.image_summary('images', images)
+ tf.summary.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size])
diff --git a/tutorials/image/cifar10/cifar10_multi_gpu_train.py b/tutorials/image/cifar10/cifar10_multi_gpu_train.py
index ed0ac6f..c83580e 100644
--- a/tutorials/image/cifar10/cifar10_multi_gpu_train.py
+++ b/tutorials/image/cifar10/cifar10_multi_gpu_train.py
@@ -93,7 +93,7 @@ def tower_loss(scope):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
- tf.contrib.deprecated.scalar_summary(loss_name, l)
+ tf.summary.scalar_summary(loss_name, l)
return total_loss
@@ -187,13 +187,13 @@ def train():
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
- summaries.append(tf.contrib.deprecated.scalar_summary('learning_rate', lr))
+ summaries.append(tf.summary.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
- tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients',
+ tf.summary.histogram_summary(var.op.name + '/gradients',
grad))
# Apply the gradients to adjust the shared variables.
@@ -202,7 +202,7 @@ def train():
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(
- tf.contrib.deprecated.histogram_summary(var.op.name, var))
+ tf.summary.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
@@ -216,7 +216,7 @@ def train():
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
- summary_op = tf.contrib.deprecated.merge_summary(summaries)
+ summary_op = tf.summary.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment