aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/ops/losses/losses_impl.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/ops/losses/losses_impl.py')
-rw-r--r--tensorflow/python/ops/losses/losses_impl.py25
1 files changed, 14 insertions, 11 deletions
diff --git a/tensorflow/python/ops/losses/losses_impl.py b/tensorflow/python/ops/losses/losses_impl.py
index c86cc92321..a39417139e 100644
--- a/tensorflow/python/ops/losses/losses_impl.py
+++ b/tensorflow/python/ops/losses/losses_impl.py
@@ -156,8 +156,10 @@ def _num_present(losses, weights, per_batch=False):
present = weights_broadcast_ops.broadcast_weights(present, losses)
if per_batch:
return math_ops.reduce_sum(
- present, axis=math_ops.range(1, array_ops.rank(present)),
- keep_dims=True, name=scope)
+ present,
+ axis=math_ops.range(1, array_ops.rank(present)),
+ keepdims=True,
+ name=scope)
return math_ops.reduce_sum(present, name=scope)
@@ -324,7 +326,7 @@ def cosine_distance(
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
- losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keep_dims=True)
+ losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@@ -390,7 +392,7 @@ def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
- [batch_size], then the total loss for each sample of the batch is rescaled
+ `[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
@@ -452,7 +454,7 @@ def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
- [batch_size], then the total loss for each sample of the batch is rescaled
+ `[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
@@ -519,7 +521,7 @@ def mean_pairwise_squared_error(
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
- [batch_size], then the total loss for each sample of the batch is rescaled
+ `[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
@@ -559,15 +561,16 @@ def mean_pairwise_squared_error(
reduction_indices = math_ops.range(1, array_ops.rank(diffs))
sum_squares_diff_per_batch = math_ops.reduce_sum(
- math_ops.square(diffs), reduction_indices=reduction_indices,
- keep_dims=True)
+ math_ops.square(diffs),
+ reduction_indices=reduction_indices,
+ keepdims=True)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch - 1)
sum_diff = math_ops.reduce_sum(
- diffs, reduction_indices=reduction_indices, keep_dims=True)
+ diffs, reduction_indices=reduction_indices, keepdims=True)
term2 = 2.0 * _safe_div(
math_ops.square(sum_diff),
math_ops.multiply(num_present_per_batch, num_present_per_batch - 1))
@@ -593,7 +596,7 @@ def mean_squared_error(
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
- [batch_size], then the total loss for each sample of the batch is rescaled
+ `[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
@@ -812,7 +815,7 @@ def sparse_softmax_cross_entropy(
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
- tensor of shape [`batch_size`], then the loss weights apply to each
+ tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
Args: