aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/python/ops/gradients.py12
-rw-r--r--tensorflow/python/ops/gradients_test.py12
2 files changed, 18 insertions, 6 deletions
diff --git a/tensorflow/python/ops/gradients.py b/tensorflow/python/ops/gradients.py
index fcf3af8183..f07454028c 100644
--- a/tensorflow/python/ops/gradients.py
+++ b/tensorflow/python/ops/gradients.py
@@ -638,13 +638,13 @@ def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
- if all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
+ if len(out_grad) < 2:
+ used = "nop"
+ out_grads[i] = out_grad[0]
+ elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
- if len(out_grad) < 2:
- used = "nop"
- out_grads[i] = out_grad[0]
- elif (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
- and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
+ if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
+ and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
diff --git a/tensorflow/python/ops/gradients_test.py b/tensorflow/python/ops/gradients_test.py
index 492d60931f..77711569b3 100644
--- a/tensorflow/python/ops/gradients_test.py
+++ b/tensorflow/python/ops/gradients_test.py
@@ -260,6 +260,18 @@ class GradientsTest(test_util.TensorFlowTestCase):
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
+ def testSingletonIndexedSlices(self):
+ with ops.Graph().as_default():
+ x = tf.placeholder(tf.float32)
+ y = tf.identity(x)
+ dy = tf.IndexedSlices(tf.placeholder(tf.float32),
+ tf.placeholder(tf.int32))
+ dx, = gradients.gradients(y, x, grad_ys=dy)
+ # The gradient of tf.identity should pass the value through unchanged.
+ # A previous version of the code did this only for tf.Tensor, not
+ # tf.IndexedSlices.
+ self.assertEqual(dx, dy)
+
class FunctionGradientsTest(test_util.TensorFlowTestCase):