aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/models/image/cifar10/cifar10.py
diff options
context:
space:
mode:
authorGravatar Dan Mané <danmane@google.com>2016-11-30 12:50:52 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-11-30 13:04:47 -0800
commitc532a5b558a451d599190f1dbbdf68f08dfcaa88 (patch)
tree03ad4a5a6fc3055c52467e184c5741423cc7ba80 /tensorflow/models/image/cifar10/cifar10.py
parent36ee2ec0e6480910720530c67ab18da0312f24dc (diff)
Migrate tf summary ops to use tf.contrib.deprecated endpoints.
Change: 140639557
Diffstat (limited to 'tensorflow/models/image/cifar10/cifar10.py')
-rw-r--r--tensorflow/models/image/cifar10/cifar10.py17
1 files changed, 9 insertions, 8 deletions
diff --git a/tensorflow/models/image/cifar10/cifar10.py b/tensorflow/models/image/cifar10/cifar10.py
index 1c51b76f09..55c34ba84b 100644
--- a/tensorflow/models/image/cifar10/cifar10.py
+++ b/tensorflow/models/image/cifar10/cifar10.py
@@ -91,8 +91,9 @@ def _activation_summary(x):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
- tf.histogram_summary(tensor_name + '/activations', x)
- tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
+ tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)
+ tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity',
+ tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
@@ -316,8 +317,8 @@ def _add_loss_summaries(total_loss):
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
- tf.scalar_summary(l.op.name +' (raw)', l)
- tf.scalar_summary(l.op.name, loss_averages.average(l))
+ tf.contrib.deprecated.scalar_summary(l.op.name + ' (raw)', l)
+ tf.contrib.deprecated.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
@@ -345,7 +346,7 @@ def train(total_loss, global_step):
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
- tf.scalar_summary('learning_rate', lr)
+ tf.contrib.deprecated.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
@@ -360,12 +361,12 @@ def train(total_loss, global_step):
# Add histograms for trainable variables.
for var in tf.trainable_variables():
- tf.histogram_summary(var.op.name, var)
+ tf.contrib.deprecated.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
- tf.histogram_summary(var.op.name + '/gradients', grad)
+ tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
@@ -394,5 +395,5 @@ def maybe_download_and_extract():
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
-
+
tarfile.open(filepath, 'r:gz').extractall(dest_directory)