aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/ops
diff options
context:
space:
mode:
authorGravatar Anna R <annarev@google.com>2018-10-01 11:53:27 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-10-01 11:58:12 -0700
commitf1fd53748b99532b2572b8909efcd4f5c06ce28d (patch)
tree16ae93334d4b10a3bf7cfb1f088efd725857fb54 /tensorflow/python/ops
parent7cabc6be4e32dfb7f42c7f5e33549984bfdb68a3 (diff)
Updating function and class tf_export decorators for endpoints according to
https://github.com/tensorflow/community/pull/16. In addition to the changes in the doc, I made the following updates (these changes make sense to me and I didn't notice them when compiling the doc): * deprecate saved_model.builder.SavedModelBuilder - replaced with saved_model.SavedModelBuilder * deprecate python_io.tf_record_iterator - replaced with io.tf_record_iterator * deprecate python_io.TFRecordWriter - replaced with io.TFRecordWriter * move reduce_join to tf.string PiperOrigin-RevId: 215253944
Diffstat (limited to 'tensorflow/python/ops')
-rw-r--r--tensorflow/python/ops/array_ops.py44
-rw-r--r--tensorflow/python/ops/candidate_sampling_ops.py8
-rw-r--r--tensorflow/python/ops/check_ops.py63
-rw-r--r--tensorflow/python/ops/clip_ops.py8
-rw-r--r--tensorflow/python/ops/confusion_matrix.py4
-rw-r--r--tensorflow/python/ops/control_flow_ops.py2
-rw-r--r--tensorflow/python/ops/data_flow_ops.py17
-rw-r--r--tensorflow/python/ops/init_ops.py5
-rw-r--r--tensorflow/python/ops/linalg_ops.py15
-rw-r--r--tensorflow/python/ops/lookup_ops.py2
-rw-r--r--tensorflow/python/ops/manip_ops.py4
-rw-r--r--tensorflow/python/ops/math_ops.py145
-rw-r--r--tensorflow/python/ops/nn_impl.py6
-rw-r--r--tensorflow/python/ops/nn_ops.py8
-rw-r--r--tensorflow/python/ops/numerics.py4
-rw-r--r--tensorflow/python/ops/parsing_ops.py18
-rw-r--r--tensorflow/python/ops/random_ops.py19
-rw-r--r--tensorflow/python/ops/sparse_ops.py107
-rw-r--r--tensorflow/python/ops/special_math_ops.py4
-rw-r--r--tensorflow/python/ops/string_ops.py7
20 files changed, 300 insertions, 190 deletions
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py
index a7f57e94e3..9f5149d5ac 100644
--- a/tensorflow/python/ops/array_ops.py
+++ b/tensorflow/python/ops/array_ops.py
@@ -1204,7 +1204,8 @@ def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
return _apply_mask_1d(tensor, mask, axis)
-@tf_export("sparse_mask")
+@tf_export("sparse.mask", "sparse_mask")
+@deprecation.deprecated_endpoints("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
@@ -1226,7 +1227,7 @@ def sparse_mask(a, mask_indices, name=None):
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
- b = tf.sparse_mask(a, [12, 45])
+ b = tf.sparse.mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
@@ -1382,7 +1383,7 @@ def transpose(a, perm=None, name="transpose", conjugate=False):
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
- # (this common operation has a shorthand `matrix_transpose`)
+ # (this common operation has a shorthand `linalg.transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
@@ -1421,7 +1422,8 @@ def transpose(a, perm=None, name="transpose", conjugate=False):
# pylint: disable=invalid-name
-@tf_export("matrix_transpose", "linalg.transpose")
+@tf_export("linalg.transpose", "matrix_transpose")
+@deprecation.deprecated_endpoints("matrix_transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
@@ -1429,19 +1431,19 @@ def matrix_transpose(a, name="matrix_transpose", conjugate=False):
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
- tf.matrix_transpose(x) # [[1, 4],
+ tf.linalg.transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
- tf.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
+ tf.linalg.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
- # tf.matrix_transpose(x) is shape [1, 2, 4, 3]
+ # tf.linalg.transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
@@ -1452,14 +1454,14 @@ def matrix_transpose(a, name="matrix_transpose", conjugate=False):
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
- tf.matmul(matrix, tf.matrix_transpose(b))
+ tf.matmul(matrix, tf.linalg.transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
- TensorFlow does not support strides, `matrix_transposes` return a new tensor
+ TensorFlow does not support strides, `linalg.transposes` return a new tensor
with the items permuted.
@end_compatibility
@@ -1467,7 +1469,7 @@ def matrix_transpose(a, name="matrix_transpose", conjugate=False):
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
- to tf.conj(tf.matrix_transpose(input)).
+ to tf.conj(tf.linalg.transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
@@ -1756,7 +1758,8 @@ def _normalize_sparse_shape(shape, name):
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
-@tf_export("sparse_placeholder")
+@tf_export("sparse.placeholder", "sparse_placeholder")
+@deprecation.deprecated_endpoints("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
@@ -1767,8 +1770,8 @@ def sparse_placeholder(dtype, shape=None, name=None):
For example:
```python
- x = tf.sparse_placeholder(tf.float32)
- y = tf.sparse_reduce_sum(x)
+ x = tf.sparse.placeholder(tf.float32)
+ y = tf.sparse.reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
@@ -2250,7 +2253,8 @@ def required_space_to_batch_paddings(input_shape,
return result_paddings, result_crops
-@tf_export("space_to_batch")
+@tf_export("nn.space_to_batch", "space_to_batch")
+@deprecation.deprecated_endpoints("space_to_batch")
def space_to_batch(input, paddings, block_size, name=None): # pylint: disable=redefined-builtin
result = space_to_batch_nd(
input,
@@ -2264,7 +2268,8 @@ def space_to_batch(input, paddings, block_size, name=None): # pylint: disable=r
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
-@tf_export("space_to_depth")
+@tf_export("nn.space_to_depth", "space_to_depth")
+@deprecation.deprecated_endpoints("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
@@ -2272,7 +2277,8 @@ def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint:
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
-@tf_export("depth_to_space")
+@tf_export("nn.depth_to_space", "depth_to_space")
+@deprecation.deprecated_endpoints("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
@@ -2747,7 +2753,8 @@ def batch_gather(params, indices, name=None):
@tf_export("quantize_v2")
@deprecation.deprecated(
"2017-10-25",
- "`tf.quantize_v2` is deprecated, please use `tf.quantize` instead.")
+ "`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
+ "instead.") # pylint: disable=missing-docstring
def quantize_v2(input, # pylint: disable=redefined-builtin
min_range,
max_range,
@@ -2769,7 +2776,8 @@ quantize_v2.__doc__ = """Please use `tf.quantize` instead."""
# We want to expose tf.quantize instead of tf.quantize_v2; we can deprecate
# tf.quantize_v2 in next version of TensorFlow.
-@tf_export("quantize")
+@tf_export("quantization.quantize", "quantize")
+@deprecation.deprecated_endpoints("quantize")
def quantize(input, # pylint: disable=redefined-builtin
min_range,
max_range,
diff --git a/tensorflow/python/ops/candidate_sampling_ops.py b/tensorflow/python/ops/candidate_sampling_ops.py
index 9ea1ea9c92..98dde995c9 100644
--- a/tensorflow/python/ops/candidate_sampling_ops.py
+++ b/tensorflow/python/ops/candidate_sampling_ops.py
@@ -23,10 +23,12 @@ from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops # pylint: disable=unused-import
from tensorflow.python.ops import gen_candidate_sampling_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
-@tf_export('nn.uniform_candidate_sampler')
+@tf_export('random.uniform_candidate_sampler', 'nn.uniform_candidate_sampler')
+@deprecation.deprecated_endpoints('nn.uniform_candidate_sampler')
def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, seed=None, name=None):
"""Samples a set of classes using a uniform base distribution.
@@ -82,7 +84,9 @@ def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
seed2=seed2, name=name)
-@tf_export('nn.log_uniform_candidate_sampler')
+@tf_export('random.log_uniform_candidate_sampler',
+ 'nn.log_uniform_candidate_sampler')
+@deprecation.deprecated_endpoints('nn.log_uniform_candidate_sampler')
def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, seed=None, name=None):
"""Samples a set of classes using a log-uniform (Zipfian) base distribution.
diff --git a/tensorflow/python/ops/check_ops.py b/tensorflow/python/ops/check_ops.py
index c3cf6e61f2..d607f1d9fb 100644
--- a/tensorflow/python/ops/check_ops.py
+++ b/tensorflow/python/ops/check_ops.py
@@ -36,6 +36,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
NUMERIC_TYPES = frozenset(
@@ -91,7 +92,8 @@ def _shape_and_dtype_str(tensor):
return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name)
-@tf_export('assert_proper_iterable')
+@tf_export('debugging.assert_proper_iterable', 'assert_proper_iterable')
+@deprecation.deprecated_endpoints('assert_proper_iterable')
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
@@ -119,7 +121,8 @@ def assert_proper_iterable(values):
'Expected argument "values" to be iterable. Found: %s' % type(values))
-@tf_export('assert_negative')
+@tf_export('debugging.assert_negative', 'assert_negative')
+@deprecation.deprecated_endpoints('assert_negative')
def assert_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
@@ -160,7 +163,8 @@ def assert_negative(x, data=None, summarize=None, message=None, name=None):
return assert_less(x, zero, data=data, summarize=summarize)
-@tf_export('assert_positive')
+@tf_export('debugging.assert_positive', 'assert_positive')
+@deprecation.deprecated_endpoints('assert_positive')
def assert_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
@@ -200,7 +204,8 @@ def assert_positive(x, data=None, summarize=None, message=None, name=None):
return assert_less(zero, x, data=data, summarize=summarize)
-@tf_export('assert_non_negative')
+@tf_export('debugging.assert_non_negative', 'assert_non_negative')
+@deprecation.deprecated_endpoints('assert_non_negative')
def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
@@ -242,7 +247,8 @@ def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
return assert_less_equal(zero, x, data=data, summarize=summarize)
-@tf_export('assert_non_positive')
+@tf_export('debugging.assert_non_positive', 'assert_non_positive')
+@deprecation.deprecated_endpoints('assert_non_positive')
def assert_non_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= 0` holds element-wise.
@@ -284,7 +290,7 @@ def assert_non_positive(x, data=None, summarize=None, message=None, name=None):
return assert_less_equal(x, zero, data=data, summarize=summarize)
-@tf_export('assert_equal')
+@tf_export('debugging.assert_equal', 'assert_equal')
def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x == y` holds element-wise.
@@ -384,7 +390,8 @@ def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
return control_flow_ops.Assert(condition, data, summarize=summarize)
-@tf_export('assert_none_equal')
+@tf_export('debugging.assert_none_equal', 'assert_none_equal')
+@deprecation.deprecated_endpoints('assert_none_equal')
def assert_none_equal(
x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x != y` holds for all elements.
@@ -435,7 +442,8 @@ def assert_none_equal(
return control_flow_ops.Assert(condition, data, summarize=summarize)
-@tf_export('assert_near')
+@tf_export('debugging.assert_near', 'assert_near')
+@deprecation.deprecated_endpoints('assert_near')
def assert_near(
x, y, rtol=None, atol=None, data=None, summarize=None, message=None,
name=None):
@@ -513,7 +521,7 @@ def assert_near(
return control_flow_ops.Assert(condition, data, summarize=summarize)
-@tf_export('assert_less')
+@tf_export('debugging.assert_less', 'assert_less')
def assert_less(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < y` holds element-wise.
@@ -561,7 +569,8 @@ def assert_less(x, y, data=None, summarize=None, message=None, name=None):
return control_flow_ops.Assert(condition, data, summarize=summarize)
-@tf_export('assert_less_equal')
+@tf_export('debugging.assert_less_equal', 'assert_less_equal')
+@deprecation.deprecated_endpoints('assert_less_equal')
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= y` holds element-wise.
@@ -609,7 +618,7 @@ def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
return control_flow_ops.Assert(condition, data, summarize=summarize)
-@tf_export('assert_greater')
+@tf_export('debugging.assert_greater', 'assert_greater')
def assert_greater(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > y` holds element-wise.
@@ -657,7 +666,8 @@ def assert_greater(x, y, data=None, summarize=None, message=None, name=None):
return control_flow_ops.Assert(condition, data, summarize=summarize)
-@tf_export('assert_greater_equal')
+@tf_export('debugging.assert_greater_equal', 'assert_greater_equal')
+@deprecation.deprecated_endpoints('assert_greater_equal')
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x >= y` holds element-wise.
@@ -755,7 +765,7 @@ def _assert_rank_condition(
return control_flow_ops.Assert(condition, data, summarize=summarize)
-@tf_export('assert_rank')
+@tf_export('debugging.assert_rank', 'assert_rank')
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank`.
@@ -817,7 +827,8 @@ def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
return assert_op
-@tf_export('assert_rank_at_least')
+@tf_export('debugging.assert_rank_at_least', 'assert_rank_at_least')
+@deprecation.deprecated_endpoints('assert_rank_at_least')
def assert_rank_at_least(
x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
@@ -948,7 +959,8 @@ def _assert_ranks_condition(
return control_flow_ops.Assert(condition, data, summarize=summarize)
-@tf_export('assert_rank_in')
+@tf_export('debugging.assert_rank_in', 'assert_rank_in')
+@deprecation.deprecated_endpoints('assert_rank_in')
def assert_rank_in(
x, ranks, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank in `ranks`.
@@ -1010,7 +1022,8 @@ def assert_rank_in(
return assert_op
-@tf_export('assert_integer')
+@tf_export('debugging.assert_integer', 'assert_integer')
+@deprecation.deprecated_endpoints('assert_integer')
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
@@ -1048,7 +1061,8 @@ def assert_integer(x, message=None, name=None):
return control_flow_ops.no_op('statically_determined_was_integer')
-@tf_export('assert_type')
+@tf_export('debugging.assert_type', 'assert_type')
+@deprecation.deprecated_endpoints('assert_type')
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
@@ -1095,12 +1109,14 @@ def _get_diff_for_monotonic_comparison(x):
return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
-@tf_export('is_numeric_tensor')
+@tf_export('debugging.is_numeric_tensor', 'is_numeric_tensor')
+@deprecation.deprecated_endpoints('is_numeric_tensor')
def is_numeric_tensor(tensor):
return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES
-@tf_export('is_non_decreasing')
+@tf_export('debugging.is_non_decreasing', 'is_non_decreasing')
+@deprecation.deprecated_endpoints('is_non_decreasing')
def is_non_decreasing(x, name=None):
"""Returns `True` if `x` is non-decreasing.
@@ -1127,7 +1143,8 @@ def is_non_decreasing(x, name=None):
return math_ops.reduce_all(math_ops.less_equal(zero, diff))
-@tf_export('is_strictly_increasing')
+@tf_export('debugging.is_strictly_increasing', 'is_strictly_increasing')
+@deprecation.deprecated_endpoints('is_strictly_increasing')
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
@@ -1202,7 +1219,8 @@ def _assert_same_base_type(items, expected_type=None):
return expected_type
-@tf_export('assert_same_float_dtype')
+@tf_export('debugging.assert_same_float_dtype', 'assert_same_float_dtype')
+@deprecation.deprecated_endpoints('assert_same_float_dtype')
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
@@ -1231,7 +1249,8 @@ def assert_same_float_dtype(tensors=None, dtype=None):
return dtype
-@tf_export('assert_scalar')
+@tf_export('debugging.assert_scalar', 'assert_scalar')
+@deprecation.deprecated_endpoints('assert_scalar')
def assert_scalar(tensor, name=None):
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
diff --git a/tensorflow/python/ops/clip_ops.py b/tensorflow/python/ops/clip_ops.py
index 29468431b3..45516068f4 100644
--- a/tensorflow/python/ops/clip_ops.py
+++ b/tensorflow/python/ops/clip_ops.py
@@ -30,6 +30,7 @@ from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@@ -76,8 +77,8 @@ def clip_by_value(t, clip_value_min, clip_value_max,
return t_max
# TODO(scottzhu): switch to use new implmentation in 2 weeks.
- # return gen_math_ops.clip_by_value(
- # t, clip_value_min, clip_value_max, name=name)
+ # return gen_math_ops.clip_by_value(
+ # t, clip_value_min, clip_value_max, name=name)
# TODO(scottzhu): switch to use new implmentation in 2 weeks.
@@ -159,7 +160,8 @@ def clip_by_norm(t, clip_norm, axes=None, name=None):
return tclip
-@tf_export("global_norm")
+@tf_export("linalg.global_norm", "global_norm")
+@deprecation.deprecated_endpoints("global_norm")
def global_norm(t_list, name=None):
"""Computes the global norm of multiple tensors.
diff --git a/tensorflow/python/ops/confusion_matrix.py b/tensorflow/python/ops/confusion_matrix.py
index c09154129f..8259142456 100644
--- a/tensorflow/python/ops/confusion_matrix.py
+++ b/tensorflow/python/ops/confusion_matrix.py
@@ -26,6 +26,7 @@ from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@@ -89,7 +90,8 @@ def remove_squeezable_dimensions(
return labels, predictions
-@tf_export('confusion_matrix')
+@tf_export('train.confusion_matrix', 'confusion_matrix')
+@deprecation.deprecated_endpoints('confusion_matrix')
def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,
name=None, weights=None):
"""Computes the confusion matrix from predictions and labels.
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index 9d7d31df22..8ad71fe00c 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -106,7 +106,7 @@ def _summarize_eager(tensor, summarize=None):
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
-@tf_export("Assert")
+@tf_export("debugging.Assert", "Assert")
@tf_should_use.should_use_result
def Assert(condition, data, summarize=None, name=None):
"""Asserts that the given condition is true.
diff --git a/tensorflow/python/ops/data_flow_ops.py b/tensorflow/python/ops/data_flow_ops.py
index 69c0fcbbee..97b6f3bd9c 100644
--- a/tensorflow/python/ops/data_flow_ops.py
+++ b/tensorflow/python/ops/data_flow_ops.py
@@ -39,6 +39,7 @@ from tensorflow.python.ops import resource_variable_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_data_flow_ops import *
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
@@ -112,7 +113,8 @@ def _shape_common(s1, s2):
# pylint: disable=protected-access
-@tf_export("QueueBase")
+@tf_export("io.QueueBase", "QueueBase")
+@deprecation.deprecated_endpoints("QueueBase")
class QueueBase(object):
"""Base class for queue implementations.
@@ -604,7 +606,8 @@ def _shared_name(shared_name):
return shared_name
-@tf_export("RandomShuffleQueue")
+@tf_export("io.RandomShuffleQueue", "RandomShuffleQueue")
+@deprecation.deprecated_endpoints("RandomShuffleQueue")
class RandomShuffleQueue(QueueBase):
"""A queue implementation that dequeues elements in a random order.
@@ -746,7 +749,8 @@ class FIFOQueue(QueueBase):
super(FIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)
-@tf_export("PaddingFIFOQueue")
+@tf_export("io.PaddingFIFOQueue", "PaddingFIFOQueue")
+@deprecation.deprecated_endpoints("PaddingFIFOQueue")
class PaddingFIFOQueue(QueueBase):
"""A FIFOQueue that supports batching variable-sized tensors by padding.
@@ -820,7 +824,8 @@ class PaddingFIFOQueue(QueueBase):
super(PaddingFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)
-@tf_export("PriorityQueue")
+@tf_export("io.PriorityQueue", "PriorityQueue")
+@deprecation.deprecated_endpoints("PriorityQueue")
class PriorityQueue(QueueBase):
"""A queue implementation that dequeues elements in prioritized order.
@@ -1300,7 +1305,9 @@ class ConditionalAccumulator(ConditionalAccumulatorBase):
return out
-@tf_export("SparseConditionalAccumulator")
+@tf_export("sparse.SparseConditionalAccumulator",
+ "SparseConditionalAccumulator")
+@deprecation.deprecated_endpoints("SparseConditionalAccumulator")
class SparseConditionalAccumulator(ConditionalAccumulatorBase):
"""A conditional accumulator for aggregating sparse gradients.
diff --git a/tensorflow/python/ops/init_ops.py b/tensorflow/python/ops/init_ops.py
index fff3d9b930..65bb77b474 100644
--- a/tensorflow/python/ops/init_ops.py
+++ b/tensorflow/python/ops/init_ops.py
@@ -43,6 +43,7 @@ from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
+from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.tf_export import tf_export
@@ -341,6 +342,7 @@ class TruncatedNormal(Initializer):
@tf_export("initializers.uniform_unit_scaling",
"uniform_unit_scaling_initializer")
+@deprecation.deprecated_endpoints("uniform_unit_scaling_initializer")
class UniformUnitScaling(Initializer):
"""Initializer that generates tensors without scaling variance.
@@ -401,6 +403,7 @@ class UniformUnitScaling(Initializer):
@tf_export("keras.initializers.VarianceScaling",
"initializers.variance_scaling", "variance_scaling_initializer")
+@deprecation.deprecated_endpoints("variance_scaling_initializer")
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
@@ -494,6 +497,7 @@ class VarianceScaling(Initializer):
@tf_export("keras.initializers.Orthogonal", "initializers.orthogonal",
"orthogonal_initializer", "keras.initializers.orthogonal")
+@deprecation.deprecated_endpoints("orthogonal_initializer")
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
@@ -1149,6 +1153,7 @@ class GlorotUniform(VarianceScaling):
@tf_export("glorot_normal_initializer", "keras.initializers.glorot_normal",
"initializers.glorot_normal")
+@deprecation.deprecated_endpoints("glorot_normal_initializer")
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
diff --git a/tensorflow/python/ops/linalg_ops.py b/tensorflow/python/ops/linalg_ops.py
index f4a93560be..bf4354fa73 100644
--- a/tensorflow/python/ops/linalg_ops.py
+++ b/tensorflow/python/ops/linalg_ops.py
@@ -80,6 +80,7 @@ def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind):
@tf_export('cholesky_solve', 'linalg.cholesky_solve')
+@deprecation.deprecated_endpoints('cholesky_solve')
def cholesky_solve(chol, rhs, name=None):
"""Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations.
@@ -167,7 +168,8 @@ def eye(num_rows,
name=name)
-@tf_export('matrix_solve_ls', 'linalg.lstsq')
+@tf_export('linalg.lstsq', 'matrix_solve_ls')
+@deprecation.deprecated_endpoints('matrix_solve_ls')
def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
r"""Solves one or more linear least-squares problems.
@@ -220,7 +222,7 @@ def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
squares sense.
Raises:
- NotImplementedError: matrix_solve_ls is currently disabled for complex128
+ NotImplementedError: linalg.lstsq is currently disabled for complex128
and l2_regularizer != 0 due to poor accuracy.
"""
@@ -303,7 +305,8 @@ def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
matrix, rhs, l2_regularizer, fast=fast, name=name)
-@tf_export('self_adjoint_eig', 'linalg.eigh')
+@tf_export('linalg.eigh', 'self_adjoint_eig')
+@deprecation.deprecated_endpoints('self_adjoint_eig')
def self_adjoint_eig(tensor, name=None):
"""Computes the eigen decomposition of a batch of self-adjoint matrices.
@@ -325,12 +328,13 @@ def self_adjoint_eig(tensor, name=None):
return e, v
-@tf_export('self_adjoint_eigvals', 'linalg.eigvalsh')
+@tf_export('linalg.eigvalsh', 'self_adjoint_eigvals')
+@deprecation.deprecated_endpoints('self_adjoint_eigvals')
def self_adjoint_eigvals(tensor, name=None):
"""Computes the eigenvalues of one or more self-adjoint matrices.
Note: If your program backpropagates through this function, you should replace
- it with a call to tf.self_adjoint_eig (possibly ignoring the second output) to
+ it with a call to tf.linalg.eigvalsh (possibly ignoring the second output) to
avoid computing the eigen decomposition twice. This is because the
eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See
_SelfAdjointEigV2Grad in linalg_grad.py.
@@ -348,6 +352,7 @@ def self_adjoint_eigvals(tensor, name=None):
@tf_export('svd', 'linalg.svd')
+@deprecation.deprecated_endpoints('svd')
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
r"""Computes the singular value decompositions of one or more matrices.
diff --git a/tensorflow/python/ops/lookup_ops.py b/tensorflow/python/ops/lookup_ops.py
index 5443699ddd..cffaa983d4 100644
--- a/tensorflow/python/ops/lookup_ops.py
+++ b/tensorflow/python/ops/lookup_ops.py
@@ -59,7 +59,7 @@ def initialize_all_tables(name="init_all_tables"):
return tables_initializer(name)
-@tf_export("tables_initializer")
+@tf_export("initializers.tables_initializer", "tables_initializer")
def tables_initializer(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
diff --git a/tensorflow/python/ops/manip_ops.py b/tensorflow/python/ops/manip_ops.py
index 6633565a64..d9d0728287 100644
--- a/tensorflow/python/ops/manip_ops.py
+++ b/tensorflow/python/ops/manip_ops.py
@@ -19,11 +19,13 @@ from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_manip_ops as _gen_manip_ops
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
-@tf_export('manip.roll')
+@tf_export('roll', 'manip.roll')
+@deprecation.deprecated_endpoints('manip.roll')
def roll(input, shift, axis): # pylint: disable=redefined-builtin
return _gen_manip_ops.roll(input, shift, axis)
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index f57abf6704..83b8b5a3a4 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -70,7 +70,7 @@ def _set_doc(doc):
# pylint: disable=redefined-builtin
-@tf_export("argmax")
+@tf_export("math.argmax", "argmax")
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
@@ -88,7 +88,7 @@ def argmax(input,
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
-@tf_export("argmin")
+@tf_export("math.argmin", "argmin")
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
@@ -111,7 +111,7 @@ def argmin(input,
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
-@tf_export("abs")
+@tf_export("math.abs", "abs")
def abs(x, name=None): # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.
@@ -186,7 +186,7 @@ class DivideDelegateWithName(object):
return _div_python2(self.x, y, self.name)
-@tf_export("divide")
+@tf_export("math.divide", "divide")
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
@@ -198,7 +198,7 @@ def divide(x, y, name=None):
return x / y
-@tf_export("multiply")
+@tf_export("math.multiply", "multiply")
def multiply(x, y, name=None):
return gen_math_ops.mul(x, y, name)
@@ -218,7 +218,7 @@ _mul.__doc__ = (
gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
-@tf_export("subtract")
+@tf_export("math.subtract", "subtract")
def subtract(x, y, name=None):
return gen_math_ops.sub(x, y, name)
@@ -239,7 +239,7 @@ _sub.__doc__ = (
# pylint: disable=g-docstring-has-escape
-@tf_export("negative")
+@tf_export("math.negative", "negative")
def negative(x, name=None):
"""Computes numerical negative value element-wise.
@@ -288,7 +288,7 @@ def _neg(x, name=None):
# pylint: enable=g-docstring-has-escape
-@tf_export("sign")
+@tf_export("math.sign", "sign")
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
@@ -319,7 +319,7 @@ def sign(x, name=None):
return gen_math_ops.sign(x, name=name)
-@tf_export("square")
+@tf_export("math.square", "square")
def square(x, name=None):
r"""Computes square of x element-wise.
@@ -342,7 +342,7 @@ def square(x, name=None):
return gen_math_ops.square(x, name=name)
-@tf_export("sqrt")
+@tf_export("math.sqrt", "sqrt")
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
@@ -365,7 +365,8 @@ def sqrt(x, name=None):
return gen_math_ops.sqrt(x, name=name)
-@tf_export("erf")
+@tf_export("math.erf", "erf")
+@deprecation.deprecated_endpoints("erf")
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
@@ -386,7 +387,7 @@ def erf(x, name=None):
return gen_math_ops.erf(x, name=name)
-@tf_export("scalar_mul")
+@tf_export("math.scalar_mul", "scalar_mul")
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
@@ -416,7 +417,7 @@ def scalar_mul(scalar, x):
raise ValueError("Only scalar multiply works, got shape %s" % shape)
-@tf_export("pow")
+@tf_export("math.pow", "pow")
def pow(x, y, name=None): # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.
@@ -444,7 +445,7 @@ def pow(x, y, name=None): # pylint: disable=redefined-builtin
# pylint: disable=redefined-builtin,redefined-outer-name
-@tf_export("complex")
+@tf_export("dtypes.complex", "complex")
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
@@ -486,7 +487,8 @@ def complex(real, imag, name=None):
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
-@tf_export("real")
+@tf_export("math.real", "real")
+@deprecation.deprecated_endpoints("real")
def real(input, name=None):
r"""Returns the real part of a complex (or real) tensor.
@@ -517,7 +519,8 @@ def real(input, name=None):
return input
-@tf_export("imag")
+@tf_export("math.imag", "imag")
+@deprecation.deprecated_endpoints("imag")
def imag(input, name=None):
r"""Returns the imaginary part of a complex (or real) tensor.
@@ -547,7 +550,8 @@ def imag(input, name=None):
return array_ops.zeros_like(input)
-@tf_export("angle")
+@tf_export("math.angle", "angle")
+@deprecation.deprecated_endpoints("angle")
def angle(input, name=None):
r"""Returns the element-wise argument of a complex (or real) tensor.
@@ -586,7 +590,7 @@ def angle(input, name=None):
# pylint: enable=redefined-outer-name,redefined-builtin
-@tf_export("round")
+@tf_export("math.round", "round")
def round(x, name=None): # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.
@@ -613,7 +617,7 @@ def round(x, name=None): # pylint: disable=redefined-builtin
return gen_math_ops.round(x, name=name)
-@tf_export("cast")
+@tf_export("dtypes.cast", "cast")
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
@@ -676,7 +680,7 @@ def cast(x, dtype, name=None):
return x
-@tf_export("saturate_cast")
+@tf_export("dtypes.saturate_cast", "saturate_cast")
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
@@ -995,7 +999,7 @@ def _div_python2(x, y, name=None):
return gen_math_ops.floor_div(x, y, name=name)
-@tf_export("truediv")
+@tf_export("math.truediv", "truediv")
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
@@ -1006,7 +1010,7 @@ def truediv(x, y, name=None):
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
- down, use `x // y` or `tf.floordiv`.
+ down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
@@ -1078,7 +1082,8 @@ mod = gen_math_ops.floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
-@tf_export("floordiv")
+@tf_export("math.floordiv", "floordiv")
+@deprecation.deprecated_endpoints("floordiv")
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
@@ -1151,7 +1156,8 @@ _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
-@tf_export("logical_xor")
+@tf_export("math.logical_xor", "logical_xor")
+@deprecation.deprecated_endpoints("logical_xor")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
@@ -1277,7 +1283,7 @@ def _may_reduce_to_scalar(keepdims, axis, reduction_indices, output):
return output
-@tf_export("reduce_sum")
+@tf_export("math.reduce_sum", "reduce_sum")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_sum(input_tensor,
@@ -1339,7 +1345,7 @@ def reduce_sum(input_tensor,
name=name))
-@tf_export("count_nonzero")
+@tf_export("math.count_nonzero", "count_nonzero")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def count_nonzero(input_tensor,
@@ -1417,7 +1423,7 @@ def count_nonzero(input_tensor,
dtype=dtype)
-@tf_export("reduce_mean")
+@tf_export("math.reduce_mean", "reduce_mean")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_mean(input_tensor,
@@ -1489,7 +1495,7 @@ def reduce_mean(input_tensor,
name=name))
-@tf_export("reduce_prod")
+@tf_export("math.reduce_prod", "reduce_prod")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_prod(input_tensor,
@@ -1539,7 +1545,7 @@ def reduce_prod(input_tensor,
name=name))
-@tf_export("reduce_min")
+@tf_export("math.reduce_min", "reduce_min")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_min(input_tensor,
@@ -1588,7 +1594,7 @@ def reduce_min(input_tensor,
name=name))
-@tf_export("reduce_max")
+@tf_export("math.reduce_max", "reduce_max")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_max(input_tensor,
@@ -1637,7 +1643,7 @@ def reduce_max(input_tensor,
name=name))
-@tf_export("reduce_all")
+@tf_export("math.reduce_all", "reduce_all")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_all(input_tensor,
@@ -1695,7 +1701,7 @@ def reduce_all(input_tensor,
name=name))
-@tf_export("reduce_any")
+@tf_export("math.reduce_any", "reduce_any")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_any(input_tensor,
@@ -1753,7 +1759,7 @@ def reduce_any(input_tensor,
name=name))
-@tf_export("reduce_logsumexp")
+@tf_export("math.reduce_logsumexp", "reduce_logsumexp")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_logsumexp(input_tensor,
@@ -1827,7 +1833,8 @@ def reduce_logsumexp(input_tensor,
return _may_reduce_to_scalar(keepdims, axis, reduction_indices, result)
-@tf_export("trace", "linalg.trace")
+@tf_export("linalg.trace", "trace")
+@deprecation.deprecated_endpoints("trace")
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
@@ -1841,12 +1848,12 @@ def trace(x, name=None):
```python
x = tf.constant([[1, 2], [3, 4]])
- tf.trace(x) # 5
+ tf.linalg.trace(x) # 5
x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
- tf.trace(x) # 15
+ tf.linalg.trace(x) # 15
x = tf.constant([[[1, 2, 3],
[4, 5, 6],
@@ -1854,7 +1861,7 @@ def trace(x, name=None):
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
- tf.trace(x) # [15, -15]
+ tf.linalg.trace(x) # [15, -15]
```
Args:
@@ -1869,7 +1876,7 @@ def trace(x, name=None):
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
-@tf_export("matmul")
+@tf_export("linalg.matmul", "matmul")
def matmul(a,
b,
transpose_a=False,
@@ -2131,7 +2138,7 @@ def _as_indexed_slices_list(inputs, optimize=True):
return casted_outputs
-@tf_export("add_n")
+@tf_export("math.add_n", "add_n")
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
@@ -2166,14 +2173,15 @@ def add_n(inputs, name=None):
return gen_math_ops.add_n(inputs, name=name)
-@tf_export("accumulate_n")
+@tf_export("math.accumulate_n", "accumulate_n")
+@deprecation.deprecated_endpoints("accumulate_n")
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
- `tf.accumulate_n` performs the same operation as `tf.add_n`, but does not
+ `tf.math.accumulate_n` performs the same operation as `tf.add_n`, but does not
wait for all of its inputs to be ready before beginning to sum. This can
save memory if inputs are ready at different times, since minimum temporary
storage is proportional to the output size rather than the inputs size.
@@ -2185,10 +2193,10 @@ def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
- tf.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]
+ tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
- tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
+ tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7, 4],
# [6, 14]]
```
@@ -2252,7 +2260,7 @@ def _accumulate_n_grad(op, grad):
return [grad] * len(op.inputs)
-@tf_export("nn.sigmoid", "sigmoid")
+@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
@@ -2275,7 +2283,8 @@ def sigmoid(x, name=None):
return gen_math_ops.sigmoid(x, name=name)
-@tf_export("log_sigmoid")
+@tf_export("math.log_sigmoid", "log_sigmoid")
+@deprecation.deprecated_endpoints("log_sigmoid")
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
@@ -2294,7 +2303,7 @@ def log_sigmoid(x, name=None):
return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)
-@tf_export("nn.tanh", "tanh")
+@tf_export("math.tanh", "nn.tanh", "tanh")
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
@@ -2315,7 +2324,8 @@ def tanh(x, name=None):
return gen_math_ops.tanh(x, name=name)
-@tf_export("bincount")
+@tf_export("math.bincount", "bincount")
+@deprecation.deprecated_endpoints("bincount")
def bincount(arr,
weights=None,
minlength=None,
@@ -2362,7 +2372,7 @@ def bincount(arr,
return gen_math_ops.bincount(arr, output_size, weights)
-@tf_export("cumsum")
+@tf_export("math.cumsum", "cumsum")
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
@@ -2414,7 +2424,8 @@ def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
x, axis, exclusive=exclusive, reverse=reverse, name=name)
-@tf_export("cumprod")
+@tf_export("math.cumprod", "cumprod")
+@deprecation.deprecated_endpoints("cumprod")
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
@@ -2422,7 +2433,7 @@ def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
first element of the input is identical to the first element of the output:
```python
- tf.cumprod([a, b, c]) # [a, a * b, a * b * c]
+ tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
@@ -2430,21 +2441,21 @@ def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
instead:
```python
- tf.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
+ tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
- tf.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
+ tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
- tf.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
+ tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
```
Args:
@@ -2466,7 +2477,8 @@ def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
x, axis, exclusive=exclusive, reverse=reverse, name=name)
-@tf_export("conj")
+@tf_export("math.conj", "conj")
+@deprecation.deprecated_endpoints("conj")
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
@@ -2480,7 +2492,7 @@ def conj(x, name=None):
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
+ tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
@@ -2566,7 +2578,8 @@ def _unsorted_segment_N(data, segment_ids, num_segments):
return gen_math_ops.maximum(N, 1)
-@tf_export("unsorted_segment_mean")
+@tf_export("math.unsorted_segment_mean", "unsorted_segment_mean")
+@deprecation.deprecated_endpoints("unsorted_segment_mean")
def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
r"""Computes the mean along segments of a tensor.
@@ -2608,7 +2621,8 @@ def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
return summed / N
-@tf_export("unsorted_segment_sqrt_n")
+@tf_export("math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n")
+@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor divided by the sqrt(N).
@@ -2653,7 +2667,8 @@ def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
return summed / gen_math_ops.sqrt(N)
-@tf_export("sparse_segment_sum")
+@tf_export("sparse.segment_sum", "sparse_segment_sum")
+@deprecation.deprecated_endpoints("sparse_segment_sum")
def sparse_segment_sum(data, indices, segment_ids, name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor.
@@ -2674,16 +2689,16 @@ def sparse_segment_sum(data, indices, segment_ids, name=None,
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
+ tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
+ tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
+ tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
@@ -2691,7 +2706,7 @@ def sparse_segment_sum(data, indices, segment_ids, name=None,
# [ 0 0 0 0]]
# Select all rows, two segments.
- tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
+ tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
@@ -2726,7 +2741,8 @@ def sparse_segment_sum(data, indices, segment_ids, name=None,
data=data, indices=indices, segment_ids=segment_ids, name=name)
-@tf_export("sparse_segment_mean")
+@tf_export("sparse.segment_mean", "sparse_segment_mean")
+@deprecation.deprecated_endpoints("sparse_segment_mean")
def sparse_segment_mean(data,
indices,
segment_ids,
@@ -2771,7 +2787,8 @@ def sparse_segment_mean(data,
data=data, indices=indices, segment_ids=segment_ids, name=name)
-@tf_export("sparse_segment_sqrt_n")
+@tf_export("sparse.segment_sqrt_n", "sparse_segment_sqrt_n")
+@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
def sparse_segment_sqrt_n(data,
indices,
segment_ids,
diff --git a/tensorflow/python/ops/nn_impl.py b/tensorflow/python/ops/nn_impl.py
index 2a1919e66f..453848fc00 100644
--- a/tensorflow/python/ops/nn_impl.py
+++ b/tensorflow/python/ops/nn_impl.py
@@ -328,7 +328,7 @@ def swish(features):
return features * math_ops.sigmoid(features)
-@tf_export("nn.l2_normalize")
+@tf_export("math.l2_normalize", "linalg.l2_normalize", "nn.l2_normalize")
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None):
"""Normalizes along dimension `axis` using an L2 norm.
@@ -360,7 +360,7 @@ def l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None):
return math_ops.multiply(x, x_inv_norm, name=name)
-@tf_export("nn.zero_fraction")
+@tf_export("math.zero_fraction", "nn.zero_fraction")
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
@@ -689,7 +689,7 @@ def moments(
# Compute true mean while keeping the dims for proper broadcasting.
mean = math_ops.reduce_mean(y, axes, keepdims=True, name="mean")
# sample variance, not unbiased variance
- # Note: stop_gradient does not change the gradient that gets
+ # Note: stop_gradient does not change the gradient that gets
# backpropagated to the mean from the variance calculation,
# because that gradient is zero
variance = math_ops.reduce_mean(
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index 9ef177e97b..fd71e7cc39 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -1692,7 +1692,7 @@ def _softmax(logits, compute_op, dim=-1, name=None):
return output
-@tf_export("nn.softmax")
+@tf_export("nn.softmax", "math.softmax")
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
@@ -1722,7 +1722,7 @@ def softmax(logits, axis=None, name=None, dim=None):
return _softmax(logits, gen_nn_ops.softmax, axis, name)
-@tf_export("nn.log_softmax")
+@tf_export("nn.log_softmax", "math.log_softmax")
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
@@ -2329,7 +2329,7 @@ def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): # pylint: di
return ret
-@tf_export("nn.top_k")
+@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
@@ -2644,7 +2644,7 @@ def erosion2d(value, kernel, strides, rates, padding, name=None):
name=name))
-@tf_export("nn.in_top_k")
+@tf_export("math.in_top_k", "nn.in_top_k")
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
diff --git a/tensorflow/python/ops/numerics.py b/tensorflow/python/ops/numerics.py
index 8fcbd7d834..002e87b411 100644
--- a/tensorflow/python/ops/numerics.py
+++ b/tensorflow/python/ops/numerics.py
@@ -24,10 +24,12 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
-@tf_export("verify_tensor_all_finite")
+@tf_export("debugging.assert_all_finite", "verify_tensor_all_finite")
+@deprecation.deprecated_endpoints("verify_tensor_all_finite")
def verify_tensor_all_finite(t, msg, name=None):
"""Assert that the tensor does not contain any NaN's or Inf's.
diff --git a/tensorflow/python/ops/parsing_ops.py b/tensorflow/python/ops/parsing_ops.py
index b3e03a0135..ff50fe0d09 100644
--- a/tensorflow/python/ops/parsing_ops.py
+++ b/tensorflow/python/ops/parsing_ops.py
@@ -36,6 +36,7 @@ from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.gen_parsing_ops import *
# pylint: enable=wildcard-import,undefined-variable
from tensorflow.python.platform import tf_logging
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@@ -45,7 +46,7 @@ ops.NotDifferentiable("SerializeTensor")
ops.NotDifferentiable("StringToNumber")
-@tf_export("VarLenFeature")
+@tf_export("io.VarLenFeature", "VarLenFeature")
class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])):
"""Configuration for parsing a variable-length input feature.
@@ -55,7 +56,7 @@ class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])):
pass
-@tf_export("SparseFeature")
+@tf_export("io.SparseFeature", "SparseFeature")
class SparseFeature(
collections.namedtuple(
"SparseFeature",
@@ -130,7 +131,7 @@ class SparseFeature(
cls, index_key, value_key, dtype, size, already_sorted)
-@tf_export("FixedLenFeature")
+@tf_export("io.FixedLenFeature", "FixedLenFeature")
class FixedLenFeature(collections.namedtuple(
"FixedLenFeature", ["shape", "dtype", "default_value"])):
"""Configuration for parsing a fixed-length input feature.
@@ -150,7 +151,7 @@ class FixedLenFeature(collections.namedtuple(
cls, shape, dtype, default_value)
-@tf_export("FixedLenSequenceFeature")
+@tf_export("io.FixedLenSequenceFeature", "FixedLenSequenceFeature")
class FixedLenSequenceFeature(collections.namedtuple(
"FixedLenSequenceFeature",
["shape", "dtype", "allow_missing", "default_value"])):
@@ -360,7 +361,7 @@ def _prepend_none_dimension(features):
return features
-@tf_export("parse_example")
+@tf_export("io.parse_example", "parse_example")
def parse_example(serialized, features, name=None, example_names=None):
# pylint: disable=line-too-long
"""Parses `Example` protos into a `dict` of tensors.
@@ -761,7 +762,7 @@ def _process_raw_parameters(names, dense_defaults, sparse_keys, sparse_types,
dense_shapes_as_proto, dense_shapes)
-@tf_export("parse_single_example")
+@tf_export("io.parse_single_example", "parse_single_example")
def parse_single_example(serialized, features, name=None, example_names=None):
"""Parses a single `Example` proto.
@@ -1244,7 +1245,7 @@ def _parse_sequence_example_raw(serialized,
# TODO(sundberg): rewrite this method to call the batch version, which is more
# efficient especially for large inputs.
-@tf_export("parse_single_sequence_example")
+@tf_export("io.parse_single_sequence_example", "parse_single_sequence_example")
def parse_single_sequence_example(
serialized, context_features=None, sequence_features=None,
example_name=None, name=None):
@@ -1564,7 +1565,8 @@ def _parse_single_sequence_example_raw(serialized,
# Swap `name` and `na_value` for backward compatibility.
-@tf_export("decode_csv")
+@tf_export("io.decode_csv", "decode_csv")
+@deprecation.deprecated_endpoints("decode_csv")
def decode_csv(records,
record_defaults,
field_delim=",",
diff --git a/tensorflow/python/ops/random_ops.py b/tensorflow/python/ops/random_ops.py
index 4baf506385..c2eb9dfc5d 100644
--- a/tensorflow/python/ops/random_ops.py
+++ b/tensorflow/python/ops/random_ops.py
@@ -29,6 +29,7 @@ from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_random_ops import *
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
@@ -43,7 +44,7 @@ def _ShapeTensor(shape):
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
-@tf_export("random_normal")
+@tf_export("random.normal", "random_normal")
def random_normal(shape,
mean=0.0,
stddev=1.0,
@@ -136,7 +137,7 @@ def parameterized_truncated_normal(shape,
return rnd
-@tf_export("truncated_normal")
+@tf_export("random.truncated_normal", "truncated_normal")
def truncated_normal(shape,
mean=0.0,
stddev=1.0,
@@ -181,7 +182,7 @@ ops.NotDifferentiable("ParameterizedTruncatedNormal")
ops.NotDifferentiable("TruncatedNormal")
-@tf_export("random_uniform")
+@tf_export("random.uniform", "random_uniform")
def random_uniform(shape,
minval=0,
maxval=None,
@@ -246,7 +247,7 @@ def random_uniform(shape,
ops.NotDifferentiable("RandomUniform")
-@tf_export("random_shuffle")
+@tf_export("random.shuffle", "random_shuffle")
def random_shuffle(value, seed=None, name=None):
"""Randomly shuffles a tensor along its first dimension.
@@ -277,7 +278,7 @@ def random_shuffle(value, seed=None, name=None):
value, seed=seed1, seed2=seed2, name=name)
-@tf_export("random_crop")
+@tf_export("image.random_crop", "random_crop")
def random_crop(value, size, seed=None, name=None):
"""Randomly crops a tensor to a given size.
@@ -320,7 +321,7 @@ def random_crop(value, size, seed=None, name=None):
return array_ops.slice(value, offset, size, name=name)
-@tf_export("multinomial")
+@tf_export("random.multinomial", "multinomial")
def multinomial(logits, num_samples, seed=None, name=None, output_dtype=None):
"""Draws samples from a multinomial distribution.
@@ -356,7 +357,8 @@ def multinomial(logits, num_samples, seed=None, name=None, output_dtype=None):
ops.NotDifferentiable("Multinomial")
-@tf_export("random_gamma")
+@tf_export("random.gamma", "random_gamma")
+@deprecation.deprecated_endpoints("random_gamma")
def random_gamma(shape,
alpha,
beta=None,
@@ -439,7 +441,8 @@ def random_gamma(shape,
shape, alpha_broadcast, seed=seed1, seed2=seed2) / beta)
-@tf_export("random_poisson")
+@tf_export("random.poisson", "random_poisson")
+@deprecation.deprecated_endpoints("random_poisson")
def random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):
"""Draws `shape` samples from each of the given Poisson distribution(s).
diff --git a/tensorflow/python/ops/sparse_ops.py b/tensorflow/python/ops/sparse_ops.py
index 400a42a3c0..7e3dbdbad4 100644
--- a/tensorflow/python/ops/sparse_ops.py
+++ b/tensorflow/python/ops/sparse_ops.py
@@ -185,7 +185,8 @@ def sparse_eye(num_rows,
# pylint: disable=protected-access
-@tf_export("sparse_concat")
+@tf_export("sparse.concat", "sparse_concat")
+@deprecation.deprecated_endpoints("sparse_concat")
@deprecation.deprecated_args(
None, "concat_dim is deprecated, use axis instead", "concat_dim")
def sparse_concat(axis,
@@ -317,7 +318,8 @@ def sparse_concat(axis,
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
-@tf_export("sparse_add")
+@tf_export("sparse.add", "sparse_add")
+@deprecation.deprecated_endpoints("sparse_add")
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
@@ -557,7 +559,8 @@ def sparse_dense_cwise_add(sp_t, dense_t):
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
-@tf_export("sparse_reorder")
+@tf_export("sparse.reorder", "sparse_reorder")
+@deprecation.deprecated_endpoints("sparse_reorder")
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
@@ -607,7 +610,8 @@ def sparse_reorder(sp_input, name=None):
return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape)
-@tf_export("sparse_reshape")
+@tf_export("sparse.reshape", "sparse_reshape")
+@deprecation.deprecated_endpoints("sparse_reshape")
def sparse_reshape(sp_input, shape, name=None):
"""Reshapes a `SparseTensor` to represent values in a new dense shape.
@@ -700,7 +704,8 @@ class KeywordRequired(object):
return "KeywordRequired()"
-@tf_export("sparse_split")
+@tf_export("sparse.split", "sparse_split")
+@deprecation.deprecated_endpoints("sparse_split")
@deprecation.deprecated_args(
None, "split_dim is deprecated, use axis instead", "split_dim")
def sparse_split(keyword_required=KeywordRequired(),
@@ -773,7 +778,8 @@ def sparse_split(keyword_required=KeywordRequired(),
return sparse_tensors
-@tf_export("sparse_slice")
+@tf_export("sparse.slice", "sparse_slice")
+@deprecation.deprecated_endpoints("sparse_slice")
def sparse_slice(sp_input, start, size, name=None):
"""Slice a `SparseTensor` based on the `start` and `size.
@@ -785,11 +791,11 @@ def sparse_slice(sp_input, start, size, name=None):
Graphically the output tensors are:
- sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
+ sparse.slice([0, 0], [2, 4]) = shape = [2, 4]
[ a ]
[b c ]
- sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
+ sparse.slice([0, 4], [2, 3]) = shape = [2, 3]
[ d e ]
[ ]
@@ -823,6 +829,9 @@ def sparse_slice(sp_input, start, size, name=None):
@tf_export("sparse_to_dense")
+@deprecation.deprecated(
+ None,
+ "Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.")
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
@@ -878,7 +887,8 @@ def sparse_to_dense(sparse_indices,
name=name)
-@tf_export("sparse_reduce_max")
+@tf_export("sparse.reduce_max", "sparse_reduce_max")
+@deprecation.deprecated_endpoints("sparse_reduce_max")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def sparse_reduce_max(sp_input, axis=None, keepdims=None,
@@ -912,16 +922,16 @@ def sparse_reduce_max(sp_input, axis=None, keepdims=None,
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
- tf.sparse_reduce_max(x) ==> 3
- tf.sparse_reduce_max(x, 0) ==> [1, 3, 2]
- tf.sparse_reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis.
- tf.sparse_reduce_max(x, 1, keepdims=True) ==> [[2], [3]]
- tf.sparse_reduce_max(x, [0, 1]) ==> 3
+ tf.sparse.reduce_max(x) ==> 3
+ tf.sparse.reduce_max(x, 0) ==> [1, 3, 2]
+ tf.sparse.reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis.
+ tf.sparse.reduce_max(x, 1, keepdims=True) ==> [[2], [3]]
+ tf.sparse.reduce_max(x, [0, 1]) ==> 3
# 'y' represents [[-7, ?]
# [ 4, 3]
# [ ?, ?]
- tf.sparse_reduce_max(x, 1) ==> [-7, 4, 0]
+ tf.sparse.reduce_max(x, 1) ==> [-7, 4, 0]
```
Args:
@@ -945,7 +955,8 @@ def sparse_reduce_max(sp_input, axis=None, keepdims=None,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims)
-@tf_export("sparse_reduce_max_sparse")
+@tf_export("sparse.reduce_max_sparse", "sparse_reduce_max_sparse")
+@deprecation.deprecated_endpoints("sparse_reduce_max_sparse")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def sparse_reduce_max_sparse(sp_input,
@@ -995,7 +1006,8 @@ def sparse_reduce_max_sparse(sp_input,
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
-@tf_export("sparse_reduce_sum")
+@tf_export("sparse.reduce_sum", "sparse_reduce_sum")
+@deprecation.deprecated_endpoints("sparse_reduce_sum")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def sparse_reduce_sum(sp_input, axis=None, keepdims=None,
@@ -1021,11 +1033,11 @@ def sparse_reduce_sum(sp_input, axis=None, keepdims=None,
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
- tf.sparse_reduce_sum(x) ==> 3
- tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]
- tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
- tf.sparse_reduce_sum(x, 1, keepdims=True) ==> [[2], [1]]
- tf.sparse_reduce_sum(x, [0, 1]) ==> 3
+ tf.sparse.reduce_sum(x) ==> 3
+ tf.sparse.reduce_sum(x, 0) ==> [1, 1, 1]
+ tf.sparse.reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
+ tf.sparse.reduce_sum(x, 1, keepdims=True) ==> [[2], [1]]
+ tf.sparse.reduce_sum(x, [0, 1]) ==> 3
```
Args:
@@ -1049,7 +1061,8 @@ def sparse_reduce_sum(sp_input, axis=None, keepdims=None,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims)
-@tf_export("sparse_reduce_sum_sparse")
+@tf_export("sparse.reduce_sum_sparse", "sparse_reduce_sum_sparse")
+@deprecation.deprecated_endpoints("sparse_reduce_sum_sparse")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def sparse_reduce_sum_sparse(sp_input,
@@ -1099,7 +1112,8 @@ def sparse_reduce_sum_sparse(sp_input,
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
-@tf_export("sparse_tensor_to_dense")
+@tf_export("sparse.to_dense", "sparse_tensor_to_dense")
+@deprecation.deprecated_endpoints("sparse_tensor_to_dense")
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
@@ -1151,7 +1165,8 @@ def sparse_tensor_to_dense(sp_input,
name=name)
-@tf_export("sparse_to_indicator")
+@tf_export("sparse.to_indicator", "sparse_to_indicator")
+@deprecation.deprecated_endpoints("sparse_to_indicator")
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
@@ -1214,7 +1229,8 @@ def sparse_to_indicator(sp_input, vocab_size, name=None):
sp_new, default_value=False, validate_indices=False, name=name)
-@tf_export("sparse_merge")
+@tf_export("sparse.merge", "sparse_merge")
+@deprecation.deprecated_endpoints("sparse_merge")
def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
already_sorted=False):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
@@ -1358,7 +1374,8 @@ def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
sorted_result.indices, sorted_result.values, new_shape)
-@tf_export("sparse_retain")
+@tf_export("sparse.retain", "sparse_retain")
+@deprecation.deprecated_endpoints("sparse_retain")
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
@@ -1402,7 +1419,8 @@ def sparse_retain(sp_input, to_retain):
array_ops.identity(sp_input.dense_shape))
-@tf_export("sparse_reset_shape")
+@tf_export("sparse.reset_shape", "sparse_reset_shape")
+@deprecation.deprecated_endpoints("sparse_reset_shape")
def sparse_reset_shape(sp_input, new_shape=None):
"""Resets the shape of a `SparseTensor` with indices and values unchanged.
@@ -1503,7 +1521,8 @@ def sparse_reset_shape(sp_input, new_shape=None):
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
-@tf_export("sparse_fill_empty_rows")
+@tf_export("sparse.fill_empty_rows", "sparse_fill_empty_rows")
+@deprecation.deprecated_endpoints("sparse_fill_empty_rows")
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
@@ -1567,7 +1586,8 @@ def sparse_fill_empty_rows(sp_input, default_value, name=None):
dense_shape=sp_input.dense_shape), empty_row_indicator)
-@tf_export("serialize_sparse")
+@tf_export("io.serialize_sparse", "serialize_sparse")
+@deprecation.deprecated_endpoints("serialize_sparse")
def serialize_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.
@@ -1593,7 +1613,8 @@ def serialize_sparse(sp_input, name=None, out_type=dtypes.string):
out_type=out_type)
-@tf_export("serialize_many_sparse")
+@tf_export("io.serialize_many_sparse", "serialize_many_sparse")
+@deprecation.deprecated_endpoints("serialize_many_sparse")
def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.
@@ -1694,7 +1715,8 @@ def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None):
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
-@tf_export("deserialize_many_sparse")
+@tf_export("io.deserialize_many_sparse", "deserialize_many_sparse")
+@deprecation.deprecated_endpoints("deserialize_many_sparse")
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
@@ -1712,7 +1734,7 @@ def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
- step run `sparse_reorder` to restore index ordering.
+ step run `sparse.reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
@@ -1764,7 +1786,8 @@ def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
-@tf_export("sparse_tensor_dense_matmul")
+@tf_export("sparse.matmul", "sparse_tensor_dense_matmul")
+@deprecation.deprecated_endpoints("sparse_tensor_dense_matmul")
def sparse_tensor_dense_matmul(sp_a,
b,
adjoint_a=False,
@@ -1777,7 +1800,7 @@ def sparse_tensor_dense_matmul(sp_a,
following input format is recommended for optimal behavior:
* If `adjoint_a == false`: `A` should be sorted in lexicographically
- increasing order. Use `sparse_reorder` if you're not sure.
+ increasing order. Use `sparse.reorder` if you're not sure.
* If `adjoint_a == true`: `A` should be sorted in order of increasing
dimension 1 (i.e., "column major" order instead of "row major" order).
@@ -1981,7 +2004,8 @@ def sparse_tensor_dense_matmul(sp_a,
adjoint_b=adjoint_b)
-@tf_export("sparse_softmax")
+@tf_export("sparse.softmax", "sparse_softmax")
+@deprecation.deprecated_endpoints("sparse_softmax")
def sparse_softmax(sp_input, name=None):
"""Applies softmax to a batched N-D `SparseTensor`.
@@ -2036,7 +2060,8 @@ def sparse_softmax(sp_input, name=None):
sp_input.dense_shape)
-@tf_export("sparse_maximum")
+@tf_export("sparse.maximum", "sparse_maximum")
+@deprecation.deprecated_endpoints("sparse_maximum")
def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
@@ -2073,7 +2098,8 @@ def sparse_maximum(sp_a, sp_b, name=None):
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
-@tf_export("sparse_minimum")
+@tf_export("sparse.minimum", "sparse_minimum")
+@deprecation.deprecated_endpoints("sparse_minimum")
def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
@@ -2110,7 +2136,8 @@ def sparse_minimum(sp_a, sp_b, name=None):
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
-@tf_export("sparse_transpose")
+@tf_export("sparse.transpose", "sparse_transpose")
+@deprecation.deprecated_endpoints("sparse_transpose")
def sparse_transpose(sp_input, perm=None, name=None):
"""Transposes a `SparseTensor`
@@ -2259,7 +2286,7 @@ def _take_many_sparse_from_tensors_map(sparse_map_op,
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
- step run `sparse_reorder` to restore index ordering.
+ step run `sparse.reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
diff --git a/tensorflow/python/ops/special_math_ops.py b/tensorflow/python/ops/special_math_ops.py
index 9a10abfcf7..cfab943896 100644
--- a/tensorflow/python/ops/special_math_ops.py
+++ b/tensorflow/python/ops/special_math_ops.py
@@ -29,11 +29,13 @@ from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# TODO(b/27419586) Change docstring for required dtype of x once int allowed
-@tf_export('lbeta')
+@tf_export('math.lbeta', 'lbeta')
+@deprecation.deprecated_endpoints('lbeta')
def lbeta(x, name=None):
r"""Computes \\(ln(|Beta(x)|)\\), reducing along the last dimension.
diff --git a/tensorflow/python/ops/string_ops.py b/tensorflow/python/ops/string_ops.py
index 046a48d192..e83c08f643 100644
--- a/tensorflow/python/ops/string_ops.py
+++ b/tensorflow/python/ops/string_ops.py
@@ -310,8 +310,9 @@ def _reduce_join_reduction_dims(x, axis, reduction_indices):
return math_ops.range(array_ops.rank(x) - 1, -1, -1)
-@tf_export("reduce_join")
-def reduce_join(inputs, axis=None,
+@tf_export("strings.reduce_join", "reduce_join")
+@deprecation.deprecated_endpoints("reduce_join")
+def reduce_join(inputs, axis=None, # pylint: disable=missing-docstring
keep_dims=False,
separator="",
name=None,
@@ -329,6 +330,8 @@ def reduce_join(inputs, axis=None,
reduce_join.__doc__ = deprecation.rewrite_argument_docstring(
gen_string_ops.reduce_join.__doc__, "reduction_indices", "axis")
+reduce_join.__doc__ = reduce_join.__doc__.replace("tf.reduce_join(",
+ "tf.strings.reduce_join(")
# This wrapper provides backwards compatibility for code that predates the