aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-04-23 15:15:25 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-23 15:17:58 -0700
commit9e1d93d28fe30171de3f6838028eeadb44b0d6fd (patch)
tree26d4d4020a4ebeb548f6bbb536cb67a5ba6b9ba5
parent6f6c75a7673cd73dfbaaba3f259ce9ab5c8086a1 (diff)
Changing tf.foldl and tf.foldr to accept multiple/nested tensors as element/initializer.
PiperOrigin-RevId: 193993295
-rw-r--r--tensorflow/python/kernel_tests/functional_ops_test.py40
-rw-r--r--tensorflow/python/ops/functional_ops.py100
2 files changed, 110 insertions, 30 deletions
diff --git a/tensorflow/python/kernel_tests/functional_ops_test.py b/tensorflow/python/kernel_tests/functional_ops_test.py
index 34fb655035..5f48be94da 100644
--- a/tensorflow/python/kernel_tests/functional_ops_test.py
+++ b/tensorflow/python/kernel_tests/functional_ops_test.py
@@ -70,6 +70,26 @@ class FunctionalOpsTest(test.TestCase):
initializer=10)
self.assertAllEqual(880, self.evaluate(r))
+ @test_util.run_in_graph_and_eager_modes()
+ def testFoldl_SingleInputMultiOutput(self):
+ with self.test_session():
+ elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+ initializer = np.array([1, -1.0])
+ r = functional_ops.foldl(lambda a, x: a + x, elems, initializer)
+ r_value = self.evaluate(r)
+
+ self.assertAllEqual(22, r_value[0])
+ self.assertAllEqual(20, r_value[1])
+
+ @test_util.run_in_graph_and_eager_modes()
+ def testFoldl_MultiInputSingleOutput(self):
+ with self.test_session():
+ elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+ initializer = np.array(1.0)
+ r = functional_ops.foldl(lambda a, x: a + x[0] + x[1], (elems, -elems),
+ initializer)
+ self.assertAllEqual(1, self.evaluate(r))
+
def testFoldl_Scoped(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root") as varscope:
@@ -105,6 +125,26 @@ class FunctionalOpsTest(test.TestCase):
initializer=10)
self.assertAllEqual(1282, self.evaluate(r))
+ @test_util.run_in_graph_and_eager_modes()
+ def testFoldr_SingleInputMultiOutput(self):
+ with self.test_session():
+ elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+ initializer = np.array([1, -1.0])
+ r = functional_ops.foldr(lambda a, x: a + x, elems, initializer)
+ r_value = self.evaluate(r)
+
+ self.assertAllEqual(22, r_value[0])
+ self.assertAllEqual(20, r_value[1])
+
+ @test_util.run_in_graph_and_eager_modes()
+ def testFoldr_MultiInputSingleOutput(self):
+ with self.test_session():
+ elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+ initializer = np.array(1.0)
+ r = functional_ops.foldr(lambda a, x: a + x[0] + x[1], (elems, -elems),
+ initializer)
+ self.assertAllEqual(1, self.evaluate(r))
+
def testFoldr_Scoped(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root") as varscope:
diff --git a/tensorflow/python/ops/functional_ops.py b/tensorflow/python/ops/functional_ops.py
index 161f6f3659..1b3a1e5cbc 100644
--- a/tensorflow/python/ops/functional_ops.py
+++ b/tensorflow/python/ops/functional_ops.py
@@ -65,10 +65,20 @@ def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
+ This method also allows multi-arity `elems` and output of `fn`. If `elems`
+ is a (possibly nested) list or tuple of tensors, then each of these tensors
+ must have a matching first (unpack) dimension. The signature of `fn` may
+ match the structure of `elems`. That is, if `elems` is
+ `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
+ `fn = lambda (t1, [t2, t3, [t4, t5]]):`.
+
Args:
fn: The callable to be performed.
- elems: A tensor to be unpacked on dimension 0.
- initializer: (optional) The initial value for the accumulator.
+ elems: A tensor or (possibly nested) sequence of tensors, each of which
+ will be unpacked along their first dimension. The nested sequence
+ of the resulting slices will be the first argument to `fn`.
+ initializer: (optional) A tensor or (possibly nested) sequence of tensors,
+ as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
@@ -76,8 +86,9 @@ def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
name: (optional) Name prefix for the returned tensors.
Returns:
- A tensor resulting from applying `fn` consecutively to the list of tensors
- unpacked from `elems`, from first to last.
+ A tensor or (possibly nested) sequence of tensors, resulting from applying
+ `fn` consecutively to the list of tensors unpacked from `elems`, from first
+ to last.
Raises:
TypeError: if `fn` is not callable.
@@ -92,6 +103,11 @@ def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
if not callable(fn):
raise TypeError("fn must be callable.")
+ def create_ta(elem):
+ return tensor_array_ops.TensorArray(
+ dtype=elem.dtype, size=n, dynamic_size=False,
+ infer_shape=True).unstack(elem)
+
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldl", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
@@ -107,24 +123,26 @@ def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
- # Convert elems to tensor array.
- elems = ops.convert_to_tensor(elems, name="elems")
- n = array_ops.shape(elems)[0]
- elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
- dynamic_size=False,
- infer_shape=True)
- elems_ta = elems_ta.unstack(elems)
+ # Convert elems to tensor array. n may be known statically.
+ elems_flat = [
+ ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
+ ]
+ n = elems_flat[0].shape[0].value or array_ops.shape(elems_flat[0])[0]
+
+ elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
- a = elems_ta.read(0)
+ a = nest.map_structure(lambda elem: elem.read(0), elems_ta)
i = constant_op.constant(1)
else:
- a = ops.convert_to_tensor(initializer)
+ a = initializer
i = constant_op.constant(0)
def compute(i, a):
- a = fn(a, elems_ta.read(i))
+ elem_i = nest.map_structure(lambda elem: elem.read(i), elems_ta)
+ a = fn(a, elem_i)
return [i + 1, a]
+
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n, compute, [i, a],
parallel_iterations=parallel_iterations,
@@ -135,6 +153,7 @@ def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
+
return r_a
@@ -153,10 +172,20 @@ def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
+ This method also allows multi-arity `elems` and output of `fn`. If `elems`
+ is a (possibly nested) list or tuple of tensors, then each of these tensors
+ must have a matching first (unpack) dimension. The signature of `fn` may
+ match the structure of `elems`. That is, if `elems` is
+ `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
+ `fn = lambda (t1, [t2, t3, [t4, t5]]):`.
+
Args:
fn: The callable to be performed.
- elems: A tensor that is unpacked into a sequence of tensors to apply `fn`.
- initializer: (optional) The initial value for the accumulator.
+ elems: A tensor or (possibly nested) sequence of tensors, each of which
+ will be unpacked along their first dimension. The nested sequence
+ of the resulting slices will be the first argument to `fn`.
+ initializer: (optional) A tensor or (possibly nested) sequence of tensors,
+ as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
@@ -164,8 +193,9 @@ def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
name: (optional) Name prefix for the returned tensors.
Returns:
- A tensor resulting from applying `fn` consecutively to the list of tensors
- unpacked from `elems`, from last to first.
+ A tensor or (possibly nested) sequence of tensors, resulting from applying
+ `fn` consecutively to the list of tensors unpacked from `elems`, from last
+ to first.
Raises:
TypeError: if `fn` is not callable.
@@ -180,6 +210,11 @@ def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
if not callable(fn):
raise TypeError("fn must be callable.")
+ def create_ta(elem):
+ return tensor_array_ops.TensorArray(
+ dtype=elem.dtype, size=n, dynamic_size=False,
+ infer_shape=True).unstack(elem)
+
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldr", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
@@ -195,26 +230,30 @@ def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
- # Convert elems to tensor array.
- elems = ops.convert_to_tensor(elems, name="elems")
- n = array_ops.shape(elems)[0]
- elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
- dynamic_size=False,
- infer_shape=True)
- elems_ta = elems_ta.unstack(elems)
+ # Convert elems to tensor array. n may be known statically.
+ elems_flat = [
+ ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
+ ]
+ n = elems_flat[0].shape[0].value or array_ops.shape(elems_flat[0])[0]
+
+ elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
i = n - 1
- a = elems_ta.read(i)
+ a = nest.map_structure(lambda elem: elem.read(i), elems_ta)
else:
i = n
- a = ops.convert_to_tensor(initializer)
+ a = initializer
+
def compute(i, a):
i -= 1
- a = fn(a, elems_ta.read(i))
- return [i, a]
+ elem = nest.map_structure(lambda elem: elem.read(i), elems_ta)
+ a_out = fn(a, elem)
+ return [i, a_out]
+
_, r_a = control_flow_ops.while_loop(
- lambda i, a: i > 0, compute, [i, a],
+ lambda i, a: i > 0,
+ compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
@@ -223,6 +262,7 @@ def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
+
return r_a