aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Dan Mané <danmane@google.com>2016-10-31 10:38:33 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-10-31 11:50:28 -0700
commit1962804adc32d9bbdf0512b968c32e4cd86ae791 (patch)
tree4a4d922c7a9ae354660a4cc6cdba92e586857503
parentff529b9db13d959c7c93e67631efe7cbe3f86bc8 (diff)
Migrate tf.learn to use the new summary ops (tf.summary.X).
Since we now use node names instead of tags, and don't require argument uniqueness, some logic for checking tag uniqueness was removed. The constructed graphs will be identical, except that the summaries may get different node names. Change: 137727808
-rw-r--r--tensorflow/contrib/factorization/python/ops/gmm_ops.py2
-rw-r--r--tensorflow/contrib/layers/python/layers/optimizers.py26
-rw-r--r--tensorflow/contrib/layers/python/layers/summaries.py49
-rw-r--r--tensorflow/contrib/layers/python/layers/summaries_test.py33
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/composable_model.py7
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dnn.py14
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/estimator_test.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/head.py17
-rw-r--r--tensorflow/contrib/learn/python/learn/graph_actions_test.py8
-rw-r--r--tensorflow/contrib/learn/python/learn/learn_io/graph_io.py10
-rw-r--r--tensorflow/contrib/learn/python/learn/models.py18
-rw-r--r--tensorflow/contrib/learn/python/learn/monitors.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/monitors_test.py2
-rw-r--r--tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py7
-rw-r--r--tensorflow/contrib/training/python/training/bucket_ops.py13
-rw-r--r--tensorflow/contrib/training/python/training/sampling_ops.py3
-rw-r--r--tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py7
18 files changed, 97 insertions, 129 deletions
diff --git a/tensorflow/contrib/factorization/python/ops/gmm_ops.py b/tensorflow/contrib/factorization/python/ops/gmm_ops.py
index 94aa38325d..336c4cc25e 100644
--- a/tensorflow/contrib/factorization/python/ops/gmm_ops.py
+++ b/tensorflow/contrib/factorization/python/ops/gmm_ops.py
@@ -412,7 +412,7 @@ class GmmAlgorithm(object):
self._ll_op = []
for prior_probs in self._prior_probs:
self._ll_op.append(tf.reduce_sum(tf.log(prior_probs)))
- tf.scalar_summary('ll', tf.reduce_sum(self._ll_op))
+ tf.summary.scalar('ll', tf.reduce_sum(self._ll_op))
def gmm(inp, initial_clusters, num_clusters, random_seed,
diff --git a/tensorflow/contrib/layers/python/layers/optimizers.py b/tensorflow/contrib/layers/python/layers/optimizers.py
index a31882fecb..8c06202f47 100644
--- a/tensorflow/contrib/layers/python/layers/optimizers.py
+++ b/tensorflow/contrib/layers/python/layers/optimizers.py
@@ -21,6 +21,7 @@ from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
+from tensorflow.python import summary
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@@ -28,7 +29,6 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
@@ -140,7 +140,7 @@ def optimize_loss(loss,
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` is wrong type.
- * `clip_gradients' is not float or callable.
+ * `clip_gradients` is not float or callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
"""
@@ -181,7 +181,7 @@ def optimize_loss(loss,
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
- logging_ops.scalar_summary("learning_rate", lr)
+ summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
@@ -234,8 +234,8 @@ def optimize_loss(loss,
gradients = _multiply_gradients(gradients, gradient_multipliers)
if "gradient_norm" in summaries:
- logging_ops.scalar_summary("global_norm/gradient_norm",
- clip_ops.global_norm(zip(*gradients)[0]))
+ summary.scalar("global_norm/gradient_norm",
+ clip_ops.global_norm(zip(*gradients)[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
@@ -248,7 +248,7 @@ def optimize_loss(loss,
# Add scalar summary for loss.
if "loss" in summaries:
- logging_ops.scalar_summary("loss", loss)
+ summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
@@ -259,15 +259,14 @@ def optimize_loss(loss,
if grad_values is not None:
if "gradients" in summaries:
- logging_ops.histogram_summary("gradients/" + variable.name,
- grad_values)
+ summary.histogram("gradients/" + variable.name, grad_values)
if "gradient_norm" in summaries:
- logging_ops.scalar_summary("gradient_norm/" + variable.name,
- clip_ops.global_norm([grad_values]))
+ summary.scalar("gradient_norm/" + variable.name,
+ clip_ops.global_norm([grad_values]))
if clip_gradients is not None and "gradient_norm" in summaries:
- logging_ops.scalar_summary("global_norm/clipped_gradient_norm",
- clip_ops.global_norm(zip(*gradients)[0]))
+ summary.scalar("global_norm/clipped_gradient_norm",
+ clip_ops.global_norm(zip(*gradients)[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(gradients,
@@ -357,8 +356,7 @@ def adaptive_clipping_fn(std_factor=2.,
# reports the max gradient norm for debugging
if report_summary:
- logging_ops.scalar_summary(
- "global_norm/adaptive_max_gradient_norm", max_norm)
+ summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = math_ops.select(norm < max_norm,
diff --git a/tensorflow/contrib/layers/python/layers/summaries.py b/tensorflow/contrib/layers/python/layers/summaries.py
index 1ca19d16de..87bee372e1 100644
--- a/tensorflow/contrib/layers/python/layers/summaries.py
+++ b/tensorflow/contrib/layers/python/layers/summaries.py
@@ -21,26 +21,25 @@ from __future__ import print_function
import functools
import re
-import numpy as np
-
+from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
-from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import standard_ops
-__all__ = ['assert_summary_tag_unique', 'is_summary_tag_unique',
- 'summarize_tensor', 'summarize_activation', 'summarize_tensors',
- 'summarize_collection', 'summarize_variables', 'summarize_weights',
- 'summarize_biases', 'summarize_activations',]
+__all__ = [
+ 'summarize_tensor',
+ 'summarize_activation',
+ 'summarize_tensors',
+ 'summarize_collection',
+ 'summarize_variables',
+ 'summarize_weights',
+ 'summarize_biases',
+ 'summarize_activations',
+]
# TODO(wicke): add more unit tests for summarization functions.
-def assert_summary_tag_unique(tag):
- if not is_summary_tag_unique(tag):
- raise ValueError('Conflict with summary tag: %s already exists' % tag)
-
-
def _add_scalar_summary(tensor, tag=None):
"""Add a scalar summary operation for the tensor.
@@ -55,9 +54,8 @@ def _add_scalar_summary(tensor, tag=None):
ValueError: If the tag is already in use or the rank is not 0.
"""
tensor.get_shape().assert_has_rank(0)
- tag = tag or tensor.op.name
- assert_summary_tag_unique(tag)
- return standard_ops.scalar_summary(tag, tensor, name='%s_summary' % tag)
+ tag = tag or '%s_summary' % tensor.op.name
+ return summary.scalar(tag, tensor)
def _add_histogram_summary(tensor, tag=None):
@@ -73,25 +71,8 @@ def _add_histogram_summary(tensor, tag=None):
Raises:
ValueError: If the tag is already in use.
"""
- tag = tag or tensor.op.name
- assert_summary_tag_unique(tag)
- return standard_ops.histogram_summary(tag, tensor, name='%s_summary' % tag)
-
-
-def is_summary_tag_unique(tag):
- """Checks if a summary tag is unique.
-
- Args:
- tag: The tag to use
-
- Returns:
- True if the summary tag is unique.
- """
- existing_tags = [tensor_util.constant_value(summary.op.inputs[0])
- for summary in ops.get_collection(ops.GraphKeys.SUMMARIES)]
- existing_tags = [name.tolist() if isinstance(name, np.ndarray) else name
- for name in existing_tags]
- return tag.encode() not in existing_tags
+ tag = tag or '%s_summary' % tensor.op.name
+ return summary.histogram(tag, tensor)
def summarize_activation(op):
diff --git a/tensorflow/contrib/layers/python/layers/summaries_test.py b/tensorflow/contrib/layers/python/layers/summaries_test.py
index 78142e8ff8..e9c99bd657 100644
--- a/tensorflow/contrib/layers/python/layers/summaries_test.py
+++ b/tensorflow/contrib/layers/python/layers/summaries_test.py
@@ -23,24 +23,17 @@ import tensorflow as tf
class SummariesTest(tf.test.TestCase):
- def test_duplicate_tag(self):
- with self.test_session():
- var = tf.Variable([1, 2, 3])
- tf.contrib.layers.summarize_tensor(var)
- with self.assertRaises(ValueError):
- tf.contrib.layers.summarize_tensor(var)
-
def test_summarize_scalar_tensor(self):
with self.test_session():
scalar_var = tf.Variable(1)
summary_op = tf.contrib.layers.summarize_tensor(scalar_var)
- self.assertTrue(summary_op.op.type == 'ScalarSummary')
+ self.assertEquals(summary_op.op.type, 'ScalarSummary')
def test_summarize_multidim_tensor(self):
with self.test_session():
tensor_var = tf.Variable([1, 2, 3])
summary_op = tf.contrib.layers.summarize_tensor(tensor_var)
- self.assertTrue(summary_op.op.type == 'HistogramSummary')
+ self.assertEquals(summary_op.op.type, 'HistogramSummary')
def test_summarize_activation(self):
with self.test_session():
@@ -48,10 +41,10 @@ class SummariesTest(tf.test.TestCase):
op = tf.identity(var, name='SummaryTest')
summary_op = tf.contrib.layers.summarize_activation(op)
- self.assertTrue(summary_op.op.type == 'HistogramSummary')
+ self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in tf.get_collection(tf.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 1)
- self.assertTrue(u'SummaryTest/activation_summary' in names)
+ self.assertIn(u'SummaryTest/activation', names)
def test_summarize_activation_relu(self):
with self.test_session():
@@ -59,11 +52,11 @@ class SummariesTest(tf.test.TestCase):
op = tf.nn.relu(var, name='SummaryTest')
summary_op = tf.contrib.layers.summarize_activation(op)
- self.assertTrue(summary_op.op.type == 'HistogramSummary')
+ self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in tf.get_collection(tf.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 2)
- self.assertTrue(u'SummaryTest/zeros_summary' in names)
- self.assertTrue(u'SummaryTest/activation_summary' in names)
+ self.assertIn(u'SummaryTest/zeros', names)
+ self.assertIn(u'SummaryTest/activation', names)
def test_summarize_activation_relu6(self):
with self.test_session():
@@ -71,12 +64,12 @@ class SummariesTest(tf.test.TestCase):
op = tf.nn.relu6(var, name='SummaryTest')
summary_op = tf.contrib.layers.summarize_activation(op)
- self.assertTrue(summary_op.op.type == 'HistogramSummary')
+ self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in tf.get_collection(tf.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 3)
- self.assertTrue(u'SummaryTest/zeros_summary' in names)
- self.assertTrue(u'SummaryTest/sixes_summary' in names)
- self.assertTrue(u'SummaryTest/activation_summary' in names)
+ self.assertIn(u'SummaryTest/zeros', names)
+ self.assertIn(u'SummaryTest/sixes', names)
+ self.assertIn(u'SummaryTest/activation', names)
def test_summarize_collection_regex(self):
with self.test_session():
@@ -88,8 +81,8 @@ class SummariesTest(tf.test.TestCase):
summaries = tf.contrib.layers.summarize_collection('foo', r'Test[123]')
names = [op.op.name for op in summaries]
self.assertEquals(len(names), 2)
- self.assertTrue(u'Test2_summary' in names)
- self.assertTrue(u'Test3_summary' in names)
+ self.assertIn(u'Test2_summary', names)
+ self.assertIn(u'Test3_summary', names)
if __name__ == '__main__':
tf.test.main()
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py b/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
index d4f3c59634..46e1def77e 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
@@ -23,12 +23,12 @@ import random
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr
+from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner
@@ -305,5 +305,5 @@ def enqueue_data(data,
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
- logging_ops.scalar_summary(summary_name, full)
+ summary.scalar(summary_name, full)
return queue
diff --git a/tensorflow/contrib/learn/python/learn/estimators/composable_model.py b/tensorflow/contrib/learn/python/learn/estimators/composable_model.py
index 7bd3d90ed6..4f4a3f7a3e 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/composable_model.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/composable_model.py
@@ -28,10 +28,10 @@ from tensorflow.contrib import layers
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.layers.python.layers import feature_column_ops
+from tensorflow.python import summary
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
@@ -318,9 +318,8 @@ class DNNComposableModel(_ComposableModel):
def _add_hidden_layer_summary(self, value, tag):
# TODO(zakaria): Move this code to tf.learn and add test.
- logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag,
- nn.zero_fraction(value))
- logging_ops.histogram_summary("%s:activation" % tag, value)
+ summary.scalar("%s:fraction_of_zero_values" % tag, nn.zero_fraction(value))
+ summary.histogram("%s:activation" % tag, value)
def build_model(self, features, feature_columns, is_training):
"""See base class."""
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn.py b/tensorflow/contrib/learn/python/learn/estimators/dnn.py
index 13f38d7d16..bd9c238b64 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn.py
@@ -37,10 +37,10 @@ from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.losses.python.losses import loss_ops
+from tensorflow.python import summary
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
@@ -74,9 +74,8 @@ def _get_optimizer(optimizer):
def _add_hidden_layer_summary(value, tag):
- logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag,
- nn.zero_fraction(value))
- logging_ops.histogram_summary("%s:activation" % tag, value)
+ summary.scalar("%s:fraction_of_zero_values" % tag, nn.zero_fraction(value))
+ summary.histogram("%s:activation" % tag, value)
def _centered_bias(num_label_columns):
@@ -84,9 +83,8 @@ def _centered_bias(num_label_columns):
array_ops.zeros([num_label_columns]),
collections=[_CENTERED_BIAS, ops.GraphKeys.VARIABLES],
name=_CENTERED_BIAS_WEIGHT)
- logging_ops.scalar_summary(
- ["centered_bias %d" % cb for cb in range(num_label_columns)],
- array_ops.reshape(centered_bias, [-1]))
+ summary.scalar(["centered_bias %d" % cb for cb in range(num_label_columns)],
+ array_ops.reshape(centered_bias, [-1]))
return centered_bias
@@ -277,7 +275,7 @@ def _dnn_classifier_model_fn(features, labels, mode, params):
if enable_centered_bias:
train_ops.append(_centered_bias_step(labels, loss_fn, num_label_columns))
- logging_ops.scalar_summary("loss", loss)
+ summary.scalar("loss", loss)
return None, loss, control_flow_ops.group(*train_ops)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
index d133f72532..95a34664cb 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
@@ -439,7 +439,7 @@ class EstimatorTest(tf.test.TestCase):
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = tf.contrib.testing.simple_values_from_events(
- tf.contrib.testing.latest_events(est.model_dir), ['loss'])
+ tf.contrib.testing.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/head.py b/tensorflow/contrib/learn/python/learn/estimators/head.py
index 4e9b932c07..6bc0ba871f 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/head.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/head.py
@@ -25,10 +25,10 @@ from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.session_bundle import exporter
+from tensorflow.python import summary
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
@@ -357,8 +357,8 @@ class _RegressionHead(_Head):
loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
- logging_ops.scalar_summary(_head_prefixed(self._head_name, "loss"),
- weighted_average_loss)
+ summary.scalar(
+ _head_prefixed(self._head_name, "loss"), weighted_average_loss)
return loss, centered_bias_step
def _eval_op(self, features, labels, logits=None, logits_input=None,
@@ -501,8 +501,8 @@ class _MultiClassHead(_Head):
loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
- logging_ops.scalar_summary(_head_prefixed(self._head_name, "loss"),
- weighted_average_loss)
+ summary.scalar(
+ _head_prefixed(self._head_name, "loss"), weighted_average_loss)
return loss, centered_bias_step
def _eval_op(self, features, labels, logits=None, logits_input=None,
@@ -728,9 +728,10 @@ def _centered_bias(logits_dimension, weight_collection):
array_ops.zeros([logits_dimension]),
collections=[weight_collection, ops.GraphKeys.VARIABLES],
name="centered_bias_weight")
- logging_ops.scalar_summary(
- ["centered_bias_%d" % cb for cb in range(logits_dimension)],
- array_ops.reshape(centered_bias, [-1]))
+
+ biases = array_ops.reshape(centered_bias, [-1])
+ for cb in range(logits_dimension):
+ summary.scalar("centered_bias_%d" % cb, biases[cb])
return centered_bias
diff --git a/tensorflow/contrib/learn/python/learn/graph_actions_test.py b/tensorflow/contrib/learn/python/learn/graph_actions_test.py
index d6c93356ae..9a7306ad4a 100644
--- a/tensorflow/contrib/learn/python/learn/graph_actions_test.py
+++ b/tensorflow/contrib/learn/python/learn/graph_actions_test.py
@@ -459,7 +459,7 @@ class GraphActionsTest(tf.test.TestCase):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
loss_op = tf.constant(2.0)
- tf.scalar_summary('loss', loss_op)
+ tf.summary.scalar('loss', loss_op)
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions._monitored_train( # pylint: disable=protected-access
@@ -670,7 +670,7 @@ class GraphActionsTrainTest(tf.test.TestCase):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
loss_op = tf.constant(2.0)
- tf.scalar_summary('loss', loss_op)
+ tf.summary.scalar('loss', loss_op)
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions.train(
@@ -691,7 +691,7 @@ class GraphActionsTrainTest(tf.test.TestCase):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
loss_op = tf.constant(2.0)
- tf.scalar_summary('loss', loss_op)
+ tf.summary.scalar('loss', loss_op)
chief_exclusive_monitor = _BaseMonitorWrapper(False)
all_workers_monitor = _BaseMonitorWrapper(True)
loss = learn.graph_actions.train(
@@ -713,7 +713,7 @@ class GraphActionsTrainTest(tf.test.TestCase):
global_step = tf.contrib.framework.create_global_step(g)
train_op = tf.assign_add(global_step, 1)
loss_op = tf.constant(2.0)
- tf.scalar_summary('loss', loss_op)
+ tf.summary.scalar('loss', loss_op)
# Add explicit "local" init op to initialize all variables
# as there's no chief to init here.
init_op = variables.initialize_all_variables()
diff --git a/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py b/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
index 933c7456f5..1acd58d267 100644
--- a/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
+++ b/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
@@ -19,13 +19,13 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from tensorflow.python import summary
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as var_ops
@@ -660,10 +660,10 @@ def queue_parsed_features(parsed_features,
input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes)
# Add a summary op to debug if our feature queue is full or not.
- logging_ops.scalar_summary('queue/parsed_features/%s/fraction_of_%d_full' %
- (input_queue.name, feature_queue_capacity),
- math_ops.cast(input_queue.size(), dtypes.float32)
- * (1. / feature_queue_capacity))
+ summary.scalar('queue/parsed_features/%s/fraction_of_%d_full' %
+ (input_queue.name, feature_queue_capacity),
+ math_ops.cast(input_queue.size(), dtypes.float32) *
+ (1. / feature_queue_capacity))
# Add multiple queue runners so that the queue is always full. Adding more
# than two queue-runners may hog the cpu on the worker to fill up the queue.
diff --git a/tensorflow/contrib/learn/python/learn/models.py b/tensorflow/contrib/learn/python/learn/models.py
index f22f6d18d4..8f89da1460 100644
--- a/tensorflow/contrib/learn/python/learn/models.py
+++ b/tensorflow/contrib/learn/python/learn/models.py
@@ -23,11 +23,11 @@ import functools
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.ops import losses_ops
+from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
@@ -80,8 +80,8 @@ def linear_regression(x, y, init_mean=None, init_stddev=1.0):
"""
with vs.variable_scope('linear_regression'):
scope_name = vs.get_variable_scope().name
- logging_ops.histogram_summary('%s.x' % scope_name, x)
- logging_ops.histogram_summary('%s.y' % scope_name, y)
+ summary.histogram('%s.x' % scope_name, x)
+ summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
y_shape = y.get_shape()
if len(y_shape) == 1:
@@ -102,8 +102,8 @@ def linear_regression(x, y, init_mean=None, init_stddev=1.0):
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
- logging_ops.histogram_summary('%s.weights' % scope_name, weights)
- logging_ops.histogram_summary('%s.bias' % scope_name, bias)
+ summary.histogram('%s.weights' % scope_name, weights)
+ summary.histogram('%s.bias' % scope_name, bias)
return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
@@ -139,8 +139,8 @@ def logistic_regression(x,
"""
with vs.variable_scope('logistic_regression'):
scope_name = vs.get_variable_scope().name
- logging_ops.histogram_summary('%s.x' % scope_name, x)
- logging_ops.histogram_summary('%s.y' % scope_name, y)
+ summary.histogram('%s.x' % scope_name, x)
+ summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
# Set up the requested initialization.
if init_mean is None:
@@ -157,8 +157,8 @@ def logistic_regression(x,
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
- logging_ops.histogram_summary('%s.weights' % scope_name, weights)
- logging_ops.histogram_summary('%s.bias' % scope_name, bias)
+ summary.histogram('%s.weights' % scope_name, weights)
+ summary.histogram('%s.bias' % scope_name, bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
diff --git a/tensorflow/contrib/learn/python/learn/monitors.py b/tensorflow/contrib/learn/python/learn/monitors.py
index 9fe4cb6468..d416d83560 100644
--- a/tensorflow/contrib/learn/python/learn/monitors.py
+++ b/tensorflow/contrib/learn/python/learn/monitors.py
@@ -565,8 +565,8 @@ class SummarySaver(EveryN):
Args:
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
- buffer, as output by TF summary methods like `scalar_summary` or
- `merge_all_summaries`.
+ buffer, as output by TF summary methods like `summary.scalar` or
+ `summary.merge_all`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
diff --git a/tensorflow/contrib/learn/python/learn/monitors_test.py b/tensorflow/contrib/learn/python/learn/monitors_test.py
index bbb2f71e74..ded0fb71ab 100644
--- a/tensorflow/contrib/learn/python/learn/monitors_test.py
+++ b/tensorflow/contrib/learn/python/learn/monitors_test.py
@@ -249,7 +249,7 @@ class MonitorsTest(tf.test.TestCase):
var = tf.Variable(0.0)
var.initializer.run()
tensor = tf.assign_add(var, 1.0)
- summary_op = tf.scalar_summary('my_summary', tensor)
+ summary_op = tf.summary.scalar('my_summary', tensor)
self._run_monitor(
learn.monitors.SummarySaver(
summary_op=summary_op, save_steps=8,
diff --git a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
index 13310a3ec8..5200ef1d88 100644
--- a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
+++ b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
@@ -23,6 +23,7 @@ import collections
from six.moves import range
from tensorflow.contrib.lookup import lookup_ops
+from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
@@ -32,7 +33,6 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_sdca_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
@@ -381,9 +381,8 @@ class SdcaModel(object):
# empty_key (that will never collide with actual payloads).
empty_key=[0, 0])
- logging_ops.scalar_summary('approximate_duality_gap',
- self.approximate_duality_gap())
- logging_ops.scalar_summary('examples_seen', self._hashtable.size())
+ summary.scalar('approximate_duality_gap', self.approximate_duality_gap())
+ summary.scalar('examples_seen', self._hashtable.size())
def _symmetric_l1_regularization(self):
return self._options['symmetric_l1_regularization']
diff --git a/tensorflow/contrib/training/python/training/bucket_ops.py b/tensorflow/contrib/training/python/training/bucket_ops.py
index a9729c75fc..3f397d2401 100644
--- a/tensorflow/contrib/training/python/training/bucket_ops.py
+++ b/tensorflow/contrib/training/python/training/bucket_ops.py
@@ -26,6 +26,7 @@ import functools
import numpy as np
+from tensorflow.python import summary
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
@@ -35,7 +36,6 @@ from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
@@ -240,12 +240,11 @@ def bucket(tensors,
errors.OutOfRangeError, errors.CancelledError)))
for q in bucket_queues:
- logging_ops.scalar_summary(
- "bucket/%s/size" % q.name,
- math_ops.cast(top_queue.size(), dtypes.float32))
- logging_ops.scalar_summary(
- "bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
- math_ops.cast(top_queue.size(), dtypes.float32) * (1. / capacity))
+ summary.scalar("bucket/%s/size" % q.name,
+ math_ops.cast(top_queue.size(), dtypes.float32))
+ summary.scalar("bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
+ math_ops.cast(top_queue.size(), dtypes.float32) *
+ (1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
diff --git a/tensorflow/contrib/training/python/training/sampling_ops.py b/tensorflow/contrib/training/python/training/sampling_ops.py
index 395840d13e..2efc50cb4e 100644
--- a/tensorflow/contrib/training/python/training/sampling_ops.py
+++ b/tensorflow/contrib/training/python/training/sampling_ops.py
@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -426,7 +427,7 @@ def _conditional_batch(tensors, keep_input, batch_size, num_threads=10):
shapes=shapes_list,
dtypes=dtypes_list,
name='batched_queue')
- logging_ops.scalar_summary('queue/%s/size' % final_q.name, final_q.size())
+ summary.scalar('queue/%s/size' % final_q.name, final_q.size())
# Conditionally enqueue.
# Reshape enqueue op to match no_op's shape.
diff --git a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
index 2b198674af..4baece2e5d 100644
--- a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
+++ b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
@@ -27,6 +27,7 @@ import numbers
import six
+from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
@@ -35,7 +36,6 @@ from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
-from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training import queue_runner
@@ -1411,9 +1411,8 @@ def batch_sequences_with_states(input_key, input_sequences, input_context,
allow_small_batch=allow_small_batch)
barrier = stateful_reader.barrier
- logging_ops.scalar_summary(
- "queue/%s/ready_segment_batches_" % barrier.name,
- math_ops.cast(barrier.ready_size(), dtypes.float32))
+ summary.scalar("queue/%s/ready_segment_batches_" % barrier.name,
+ math_ops.cast(barrier.ready_size(), dtypes.float32))
q_runner = queue_runner.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op]*num_threads,