aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/ops/array_ops.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/ops/array_ops.py')
-rw-r--r--tensorflow/python/ops/array_ops.py26
1 files changed, 15 insertions, 11 deletions
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py
index b2aff617df..a8f596c7a3 100644
--- a/tensorflow/python/ops/array_ops.py
+++ b/tensorflow/python/ops/array_ops.py
@@ -402,14 +402,15 @@ def _SliceHelper(tensor, slice_spec, var=None):
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
- print(foo[tf.newaxis, :, :].eval()) # => [[[3,2,1], [9,8,7]]]
- print(foo[:, tf.newaxis, :].eval()) # => [[[3,2,1]], [[9,8,7]]]
- print(foo[:, :, tf.newaxis].eval()) # => [[[3],[2],[1]], [[9],[8],[7]]]
+ print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
+ print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
+ print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]], [[7],[8],[9]]]
# Ellipses (3 equivalent operations)
- print(foo[tf.newaxis, :, :].eval()) # => [[[3,2,1], [9,8,7]]]
- print(foo[tf.newaxis, ...].eval()) # => [[[3,2,1], [9,8,7]]]
- print(foo[tf.newaxis].eval()) # => [[[3,2,1], [9,8,7]]]
+ foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
+ print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
+ print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
+ print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
```
Notes:
@@ -760,11 +761,14 @@ def parallel_stack(values, name="parallel_stack"):
parallel_stack([x, y, z]) # => [[1, 4], [2, 5], [3, 6]]
```
- The difference between stack and parallel_stack is that stack requires all
- of the inputs be computed before the operation will begin but doesn't require
- that the input shapes be known during graph construction. Parallel stack
- will copy pieces of the input into the output as they become available, in
- some situations this can provide a performance benefit.
+ The difference between `stack` and `parallel_stack` is that `stack` requires
+ all the inputs be computed before the operation will begin but doesn't require
+ that the input shapes be known during graph construction.
+
+ `parallel_stack` will copy pieces of the input into the output as they become
+ available, in some situations this can provide a performance benefit.
+
+ Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is