From a92b15e311622adf5b8f98dd4f22934a21705c9c Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 30 Nov 2016 18:06:14 -0800 Subject: Update generated Python Op docs. Change: 140678314 --- tensorflow/g3doc/api_docs/python/array_ops.md | 23 +++++++++++----------- .../shard0/tf.reverse_sequence.md | 14 ++++++------- .../functions_and_classes/shard1/tf.reduce_join.md | 8 ++++---- .../functions_and_classes/shard2/tf.split.md | 9 +++++---- .../shard3/tf.sparse_concat.md | 9 +++++---- .../shard3/tf.sparse_split.md | 14 +++++++------ .../shard7/tf.train.SummaryWriter.md | 2 +- tensorflow/g3doc/api_docs/python/sparse_ops.md | 23 ++++++++++++---------- tensorflow/g3doc/api_docs/python/string_ops.md | 8 ++++---- tensorflow/g3doc/api_docs/python/train.md | 2 +- 10 files changed, 60 insertions(+), 52 deletions(-) diff --git a/tensorflow/g3doc/api_docs/python/array_ops.md b/tensorflow/g3doc/api_docs/python/array_ops.md index adbd947a9f..5a3d73ae49 100644 --- a/tensorflow/g3doc/api_docs/python/array_ops.md +++ b/tensorflow/g3doc/api_docs/python/array_ops.md @@ -724,12 +724,12 @@ tf.strided_slice(input, [1, 1, 0], [2, -1, 3], [1, -1, 1]) ==>[[[4, 4, 4], - - - -### `tf.split(split_dim, num_split, value, name='split')` {#split} +### `tf.split(axis, num_split, value, name='split', split_dim=None)` {#split} Splits a tensor into `num_split` tensors along one dimension. -Splits `value` along dimension `split_dim` into `num_split` smaller tensors. -Requires that `num_split` evenly divide `value.shape[split_dim]`. +Splits `value` along dimension `axis` into `num_split` smaller tensors. +Requires that `num_split` evenly divide `value.shape[axis]`. For example: @@ -757,11 +757,12 @@ tf.unpack(t, axis=axis) ##### Args: -* `split_dim`: A 0-D `int32` `Tensor`. The dimension along which to split. +* `axis`: A 0-D `int32` `Tensor`. The dimension along which to split. Must be in the range `[0, rank(value))`. * `num_split`: A Python integer. The number of ways to split. * `value`: The `Tensor` to split. * `name`: A name for the operation (optional). +* `split_dim`: The old (deprecated) name for axis. ##### Returns: @@ -1216,20 +1217,20 @@ This is the opposite of pack. The numpy equivalent is - - - -### `tf.reverse_sequence(input, seq_lengths, seq_dim, batch_dim=None, name=None)` {#reverse_sequence} +### `tf.reverse_sequence(input, seq_lengths, seq_axis=None, batch_axis=None, name=None, seq_dim=None, batch_dim=None)` {#reverse_sequence} Reverses variable length slices. -This op first slices `input` along the dimension `batch_dim`, and for each +This op first slices `input` along the dimension `batch_axis`, and for each slice `i`, reverses the first `seq_lengths[i]` elements along -the dimension `seq_dim`. +the dimension `seq_axis`. The elements of `seq_lengths` must obey `seq_lengths[i] < input.dims[seq_dim]`, and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. -The output slice `i` along dimension `batch_dim` is then given by input +The output slice `i` along dimension `batch_axis` is then given by input slice `i`, with the first `seq_lengths[i]` slices along dimension -`seq_dim` reversed. +`seq_axis` reversed. For example: @@ -1282,8 +1283,8 @@ output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] * `seq_lengths`: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D with length `input.dims(batch_dim)` and `max(seq_lengths) < input.dims(seq_dim)` -* `seq_dim`: An `int`. The dimension which is partially reversed. -* `batch_dim`: An optional `int`. Defaults to `0`. +* `seq_axis`: An `int`. The dimension which is partially reversed. +* `batch_axis`: An optional `int`. Defaults to `0`. The dimension along which reversal is performed. * `name`: A name for the operation (optional). diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.reverse_sequence.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.reverse_sequence.md index 03dd068320..b950cd5fe6 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.reverse_sequence.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.reverse_sequence.md @@ -1,17 +1,17 @@ -### `tf.reverse_sequence(input, seq_lengths, seq_dim, batch_dim=None, name=None)` {#reverse_sequence} +### `tf.reverse_sequence(input, seq_lengths, seq_axis=None, batch_axis=None, name=None, seq_dim=None, batch_dim=None)` {#reverse_sequence} Reverses variable length slices. -This op first slices `input` along the dimension `batch_dim`, and for each +This op first slices `input` along the dimension `batch_axis`, and for each slice `i`, reverses the first `seq_lengths[i]` elements along -the dimension `seq_dim`. +the dimension `seq_axis`. The elements of `seq_lengths` must obey `seq_lengths[i] < input.dims[seq_dim]`, and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. -The output slice `i` along dimension `batch_dim` is then given by input +The output slice `i` along dimension `batch_axis` is then given by input slice `i`, with the first `seq_lengths[i]` slices along dimension -`seq_dim` reversed. +`seq_axis` reversed. For example: @@ -64,8 +64,8 @@ output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] * `seq_lengths`: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D with length `input.dims(batch_dim)` and `max(seq_lengths) < input.dims(seq_dim)` -* `seq_dim`: An `int`. The dimension which is partially reversed. -* `batch_dim`: An optional `int`. Defaults to `0`. +* `seq_axis`: An `int`. The dimension which is partially reversed. +* `batch_axis`: An optional `int`. Defaults to `0`. The dimension along which reversal is performed. * `name`: A name for the operation (optional). diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.reduce_join.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.reduce_join.md index ad49e98274..a93d8208ff 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.reduce_join.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.reduce_join.md @@ -1,4 +1,4 @@ -### `tf.reduce_join(inputs, reduction_indices, keep_dims=None, separator=None, name=None)` {#reduce_join} +### `tf.reduce_join(inputs, axis=None, keep_dims=False, separator='', name=None, reduction_indices=None)` {#reduce_join} Joins a string Tensor across the given dimensions. @@ -6,7 +6,7 @@ Computes the string join across dimensions in the given string Tensor of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input strings with the given separator (default: empty string). Negative indices are counted backwards from the end, with `-1` being equivalent to `n - 1`. Passing -an empty `reduction_indices` joins all strings in linear index order and outputs +an empty `axis` joins all strings in linear index order and outputs a scalar string. @@ -31,9 +31,9 @@ tf.reduce_join(a, []) ==> ["abcd"] * `inputs`: A `Tensor` of type `string`. The input to be joined. All reduced indices must have non-zero size. -* `reduction_indices`: A `Tensor` of type `int32`. +* `axis`: A `Tensor` of type `int32`. The dimensions to reduce over. Dimensions are reduced in the - order specified. Omitting `reduction_indices` is equivalent to passing + order specified. Omitting `axis` is equivalent to passing `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. * `keep_dims`: An optional `bool`. Defaults to `False`. If `True`, retain reduced dimensions with length `1`. diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.split.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.split.md index b6bfac36d4..23bc8386c2 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.split.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.split.md @@ -1,9 +1,9 @@ -### `tf.split(split_dim, num_split, value, name='split')` {#split} +### `tf.split(axis, num_split, value, name='split', split_dim=None)` {#split} Splits a tensor into `num_split` tensors along one dimension. -Splits `value` along dimension `split_dim` into `num_split` smaller tensors. -Requires that `num_split` evenly divide `value.shape[split_dim]`. +Splits `value` along dimension `axis` into `num_split` smaller tensors. +Requires that `num_split` evenly divide `value.shape[axis]`. For example: @@ -31,11 +31,12 @@ tf.unpack(t, axis=axis) ##### Args: -* `split_dim`: A 0-D `int32` `Tensor`. The dimension along which to split. +* `axis`: A 0-D `int32` `Tensor`. The dimension along which to split. Must be in the range `[0, rank(value))`. * `num_split`: A Python integer. The number of ways to split. * `value`: The `Tensor` to split. * `name`: A name for the operation (optional). +* `split_dim`: The old (deprecated) name for axis. ##### Returns: diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.sparse_concat.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.sparse_concat.md index 618f1f0fef..70cab998b4 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.sparse_concat.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.sparse_concat.md @@ -1,4 +1,4 @@ -### `tf.sparse_concat(concat_dim, sp_inputs, name=None, expand_nonconcat_dim=False)` {#sparse_concat} +### `tf.sparse_concat(axis, sp_inputs, name=None, expand_nonconcat_dim=False, concat_dim=None)` {#sparse_concat} Concatenates a list of `SparseTensor` along the specified dimension. @@ -27,7 +27,7 @@ This op runs in `O(M log M)` time, where `M` is the total number of non-empty values across all inputs. This is due to the need for an internal sort in order to concatenate efficiently across an arbitrary dimension. -For example, if `concat_dim = 1` and the inputs are +For example, if `axis = 1` and the inputs are sp_inputs[0]: shape = [2, 3] [0, 2]: "a" @@ -52,7 +52,7 @@ Graphically this is equivalent to doing [ a] concat [ d e ] = [ a d e ] [b c ] [ ] [b c ] -Another example, if 'concat_dim = 1' and the inputs are +Another example, if 'axis = 1' and the inputs are sp_inputs[0]: shape = [3, 3] [0, 2]: "a" @@ -83,12 +83,13 @@ Graphically this is equivalent to doing ##### Args: -* `concat_dim`: Dimension to concatenate along. Must be in range [-rank, rank), +* `axis`: Dimension to concatenate along. Must be in range [-rank, rank), where rank is the number of dimensions in each input `SparseTensor`. * `sp_inputs`: List of `SparseTensor` to concatenate. * `name`: A name prefix for the returned tensors (optional). * `expand_nonconcat_dim`: Whether to allow the expansion in the non-concat dimensions. Defaulted to False. +* `concat_dim`: The old (deprecated) name for axis. ##### Returns: diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.sparse_split.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.sparse_split.md index e3e608a9e2..a92e79f53b 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.sparse_split.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.sparse_split.md @@ -1,10 +1,10 @@ -### `tf.sparse_split(split_dim, num_split, sp_input, name=None)` {#sparse_split} +### `tf.sparse_split(axis, num_split, sp_input, name=None, split_dim=None)` {#sparse_split} -Split a `SparseTensor` into `num_split` tensors along `split_dim`. +Split a `SparseTensor` into `num_split` tensors along `axis`. -If the `sp_input.shape[split_dim]` is not an integer multiple of `num_split` -each slice starting from 0:`shape[split_dim] % num_split` gets extra one -dimension. For example, if `split_dim = 1` and `num_split = 2` and the +If the `sp_input.shape[axis]` is not an integer multiple of `num_split` +each slice starting from 0:`shape[axis] % num_split` gets extra one +dimension. For example, if `axis = 1` and `num_split = 2` and the input is: input_tensor = shape = [2, 7] @@ -24,10 +24,11 @@ Graphically the output tensors are: ##### Args: -* `split_dim`: A 0-D `int32` `Tensor`. The dimension along which to split. +* `axis`: A 0-D `int32` `Tensor`. The dimension along which to split. * `num_split`: A Python integer. The number of ways to split. * `sp_input`: The `SparseTensor` to split. * `name`: A name for the operation (optional). +* `split_dim`: Deprecated old name for axis. ##### Returns: @@ -37,4 +38,5 @@ Graphically the output tensors are: * `TypeError`: If `sp_input` is not a `SparseTensor`. +* `ValueError`: If the deprecated `split_dim` and `axis` are both non None. diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md index e9bdda200f..8abe6fba48 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md @@ -28,7 +28,7 @@ Please switch to tf.summary.FileWriter. The interface and behavior is the same; # Launch the graph in a session. sess = tf.Session() # Create a summary writer, add the 'graph' to the event file. - writer = tf.train.SummaryWriter(, sess.graph) + writer = tf.summary.FileWriter(, sess.graph) ``` The other arguments to the constructor control the asynchronous writes to diff --git a/tensorflow/g3doc/api_docs/python/sparse_ops.md b/tensorflow/g3doc/api_docs/python/sparse_ops.md index 2590b21fcc..433a5cd4eb 100644 --- a/tensorflow/g3doc/api_docs/python/sparse_ops.md +++ b/tensorflow/g3doc/api_docs/python/sparse_ops.md @@ -566,7 +566,7 @@ equal to: - - - -### `tf.sparse_concat(concat_dim, sp_inputs, name=None, expand_nonconcat_dim=False)` {#sparse_concat} +### `tf.sparse_concat(axis, sp_inputs, name=None, expand_nonconcat_dim=False, concat_dim=None)` {#sparse_concat} Concatenates a list of `SparseTensor` along the specified dimension. @@ -595,7 +595,7 @@ This op runs in `O(M log M)` time, where `M` is the total number of non-empty values across all inputs. This is due to the need for an internal sort in order to concatenate efficiently across an arbitrary dimension. -For example, if `concat_dim = 1` and the inputs are +For example, if `axis = 1` and the inputs are sp_inputs[0]: shape = [2, 3] [0, 2]: "a" @@ -620,7 +620,7 @@ Graphically this is equivalent to doing [ a] concat [ d e ] = [ a d e ] [b c ] [ ] [b c ] -Another example, if 'concat_dim = 1' and the inputs are +Another example, if 'axis = 1' and the inputs are sp_inputs[0]: shape = [3, 3] [0, 2]: "a" @@ -651,12 +651,13 @@ Graphically this is equivalent to doing ##### Args: -* `concat_dim`: Dimension to concatenate along. Must be in range [-rank, rank), +* `axis`: Dimension to concatenate along. Must be in range [-rank, rank), where rank is the number of dimensions in each input `SparseTensor`. * `sp_inputs`: List of `SparseTensor` to concatenate. * `name`: A name prefix for the returned tensors (optional). * `expand_nonconcat_dim`: Whether to allow the expansion in the non-concat dimensions. Defaulted to False. +* `concat_dim`: The old (deprecated) name for axis. ##### Returns: @@ -768,13 +769,13 @@ shape `[9, 4]` and `indices` / `values`: - - - -### `tf.sparse_split(split_dim, num_split, sp_input, name=None)` {#sparse_split} +### `tf.sparse_split(axis, num_split, sp_input, name=None, split_dim=None)` {#sparse_split} -Split a `SparseTensor` into `num_split` tensors along `split_dim`. +Split a `SparseTensor` into `num_split` tensors along `axis`. -If the `sp_input.shape[split_dim]` is not an integer multiple of `num_split` -each slice starting from 0:`shape[split_dim] % num_split` gets extra one -dimension. For example, if `split_dim = 1` and `num_split = 2` and the +If the `sp_input.shape[axis]` is not an integer multiple of `num_split` +each slice starting from 0:`shape[axis] % num_split` gets extra one +dimension. For example, if `axis = 1` and `num_split = 2` and the input is: input_tensor = shape = [2, 7] @@ -794,10 +795,11 @@ Graphically the output tensors are: ##### Args: -* `split_dim`: A 0-D `int32` `Tensor`. The dimension along which to split. +* `axis`: A 0-D `int32` `Tensor`. The dimension along which to split. * `num_split`: A Python integer. The number of ways to split. * `sp_input`: The `SparseTensor` to split. * `name`: A name for the operation (optional). +* `split_dim`: Deprecated old name for axis. ##### Returns: @@ -807,6 +809,7 @@ Graphically the output tensors are: * `TypeError`: If `sp_input` is not a `SparseTensor`. +* `ValueError`: If the deprecated `split_dim` and `axis` are both non None. - - - diff --git a/tensorflow/g3doc/api_docs/python/string_ops.md b/tensorflow/g3doc/api_docs/python/string_ops.md index 7e75148891..fc27ba36f0 100644 --- a/tensorflow/g3doc/api_docs/python/string_ops.md +++ b/tensorflow/g3doc/api_docs/python/string_ops.md @@ -105,7 +105,7 @@ string tensor. - - - -### `tf.reduce_join(inputs, reduction_indices, keep_dims=None, separator=None, name=None)` {#reduce_join} +### `tf.reduce_join(inputs, axis=None, keep_dims=False, separator='', name=None, reduction_indices=None)` {#reduce_join} Joins a string Tensor across the given dimensions. @@ -113,7 +113,7 @@ Computes the string join across dimensions in the given string Tensor of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input strings with the given separator (default: empty string). Negative indices are counted backwards from the end, with `-1` being equivalent to `n - 1`. Passing -an empty `reduction_indices` joins all strings in linear index order and outputs +an empty `axis` joins all strings in linear index order and outputs a scalar string. @@ -138,9 +138,9 @@ tf.reduce_join(a, []) ==> ["abcd"] * `inputs`: A `Tensor` of type `string`. The input to be joined. All reduced indices must have non-zero size. -* `reduction_indices`: A `Tensor` of type `int32`. +* `axis`: A `Tensor` of type `int32`. The dimensions to reduce over. Dimensions are reduced in the - order specified. Omitting `reduction_indices` is equivalent to passing + order specified. Omitting `axis` is equivalent to passing `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. * `keep_dims`: An optional `bool`. Defaults to `False`. If `True`, retain reduced dimensions with length `1`. diff --git a/tensorflow/g3doc/api_docs/python/train.md b/tensorflow/g3doc/api_docs/python/train.md index ece566e65f..8e3fc525fd 100644 --- a/tensorflow/g3doc/api_docs/python/train.md +++ b/tensorflow/g3doc/api_docs/python/train.md @@ -4154,7 +4154,7 @@ Please switch to tf.summary.FileWriter. The interface and behavior is the same; # Launch the graph in a session. sess = tf.Session() # Create a summary writer, add the 'graph' to the event file. - writer = tf.train.SummaryWriter(, sess.graph) + writer = tf.summary.FileWriter(, sess.graph) ``` The other arguments to the constructor control the asynchronous writes to -- cgit v1.2.3