diff options
Diffstat (limited to 'tensorflow/python/ops/array_ops.py')
-rw-r--r-- | tensorflow/python/ops/array_ops.py | 30 |
1 files changed, 17 insertions, 13 deletions
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py index b2aff617df..8db5efb447 100644 --- a/tensorflow/python/ops/array_ops.py +++ b/tensorflow/python/ops/array_ops.py @@ -402,14 +402,15 @@ def _SliceHelper(tensor, slice_spec, var=None): # Insert another dimension foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]]) - print(foo[tf.newaxis, :, :].eval()) # => [[[3,2,1], [9,8,7]]] - print(foo[:, tf.newaxis, :].eval()) # => [[[3,2,1]], [[9,8,7]]] - print(foo[:, :, tf.newaxis].eval()) # => [[[3],[2],[1]], [[9],[8],[7]]] + print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]] + print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]] + print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]], [[7],[8],[9]]] # Ellipses (3 equivalent operations) - print(foo[tf.newaxis, :, :].eval()) # => [[[3,2,1], [9,8,7]]] - print(foo[tf.newaxis, ...].eval()) # => [[[3,2,1], [9,8,7]]] - print(foo[tf.newaxis].eval()) # => [[[3,2,1], [9,8,7]]] + foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]]) + print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]] + print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]] + print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]] ``` Notes: @@ -760,11 +761,14 @@ def parallel_stack(values, name="parallel_stack"): parallel_stack([x, y, z]) # => [[1, 4], [2, 5], [3, 6]] ``` - The difference between stack and parallel_stack is that stack requires all - of the inputs be computed before the operation will begin but doesn't require - that the input shapes be known during graph construction. Parallel stack - will copy pieces of the input into the output as they become available, in - some situations this can provide a performance benefit. + The difference between `stack` and `parallel_stack` is that `stack` requires + all the inputs be computed before the operation will begin but doesn't require + that the input shapes be known during graph construction. + + `parallel_stack` will copy pieces of the input into the output as they become + available, in some situations this can provide a performance benefit. + + Unlike `stack`, `parallel_stack` does NOT support backpropagation. This is the opposite of unstack. The numpy equivalent is @@ -1369,7 +1373,7 @@ def zeros(shape, dtype=dtypes.float32, name=None): ``` Args: - shape: Either a list of integers, or a 1-D `Tensor` of type `int32`. + shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`. dtype: The type of an element in the resulting `Tensor`. name: A name for the operation (optional). @@ -1483,7 +1487,7 @@ def ones(shape, dtype=dtypes.float32, name=None): ``` Args: - shape: Either a list of integers, or a 1-D `Tensor` of type `int32`. + shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`. dtype: The type of an element in the resulting `Tensor`. name: A name for the operation (optional). |