aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/ops/nn_impl.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/ops/nn_impl.py')
-rw-r--r--tensorflow/python/ops/nn_impl.py16
1 files changed, 10 insertions, 6 deletions
diff --git a/tensorflow/python/ops/nn_impl.py b/tensorflow/python/ops/nn_impl.py
index 431ea1186a..da037a7983 100644
--- a/tensorflow/python/ops/nn_impl.py
+++ b/tensorflow/python/ops/nn_impl.py
@@ -32,6 +32,8 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
+from tensorflow.python.util.deprecation import deprecated_args
+from tensorflow.python.util.deprecation import deprecated_argument_lookup
def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None):
@@ -313,19 +315,20 @@ def swish(features):
return features * math_ops.sigmoid(features)
-def l2_normalize(x, dim, epsilon=1e-12, name=None):
- """Normalizes along dimension `dim` using an L2 norm.
+@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
+def l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None):
+ """Normalizes along dimension `axis` using an L2 norm.
- For a 1-D tensor with `dim = 0`, computes
+ For a 1-D tensor with `axis = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
- dimension `dim`.
+ dimension `axis`.
Args:
x: A `Tensor`.
- dim: Dimension along which to normalize. A scalar or a vector of
+ axis: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
@@ -335,8 +338,9 @@ def l2_normalize(x, dim, epsilon=1e-12, name=None):
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, "l2_normalize", [x]) as name:
+ axis = deprecated_argument_lookup("axis", axis, "dim", dim)
x = ops.convert_to_tensor(x, name="x")
- square_sum = math_ops.reduce_sum(math_ops.square(x), dim, keep_dims=True)
+ square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keep_dims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.multiply(x, x_inv_norm, name=name)